summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-08-30 12:57:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-08-30 12:57:10 -0700
commit44a7d4441181d0f2d622dc9bb512d7f5ca13f768 (patch)
treed7315b0896d47ef9e1d9ef3ebfbcb4b6f0a96d10 /drivers
parent4ca4256453effb885c1688633676682529593f82 (diff)
parent6ae51ffe5e768d9e25a7f4298e2e7a058472bcc3 (diff)
downloadlwn-44a7d4441181d0f2d622dc9bb512d7f5ca13f768.tar.gz
lwn-44a7d4441181d0f2d622dc9bb512d7f5ca13f768.zip
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Algorithms: - Add AES-NI/AVX/x86_64 implementation of SM4. Drivers: - Add Arm SMCCC TRNG based driver" [ And obviously a lot of random fixes and updates - Linus] * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (84 commits) crypto: sha512 - remove imaginary and mystifying clearing of variables crypto: aesni - xts_crypt() return if walk.nbytes is 0 padata: Remove repeated verbose license text crypto: ccp - Add support for new CCP/PSP device ID crypto: x86/sm4 - add AES-NI/AVX2/x86_64 implementation crypto: x86/sm4 - export reusable AESNI/AVX functions crypto: rmd320 - remove rmd320 in Makefile crypto: skcipher - in_irq() cleanup crypto: hisilicon - check _PS0 and _PR0 method crypto: hisilicon - change parameter passing of debugfs function crypto: hisilicon - support runtime PM for accelerator device crypto: hisilicon - add runtime PM ops crypto: hisilicon - using 'debugfs_create_file' instead of 'debugfs_create_regset32' crypto: tcrypt - add GCM/CCM mode test for SM4 algorithm crypto: testmgr - Add GCM/CCM mode test of SM4 algorithm crypto: tcrypt - Fix missing return value check crypto: hisilicon/sec - modify the hardware endian configuration crypto: hisilicon/sec - fix the abnormal exiting process crypto: qat - store vf.compatible flag crypto: qat - do not export adf_iov_putmsg() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/amd-rng.c8
-rw-r--r--drivers/char/hw_random/arm_smccc_trng.c123
-rw-r--r--drivers/char/hw_random/geode-rng.c8
-rw-r--r--drivers/char/hw_random/intel-rng.c8
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c9
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c9
-rw-r--r--drivers/crypto/atmel-aes.c154
-rw-r--r--drivers/crypto/atmel-tdes.c66
-rw-r--r--drivers/crypto/ccp/sev-dev.c49
-rw-r--r--drivers/crypto/ccp/sp-pci.c19
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c123
-rw-r--r--drivers/crypto/hisilicon/qm.c430
-rw-r--r--drivers/crypto/hisilicon/qm.h8
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h5
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c138
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c83
-rw-r--r--drivers/crypto/mxs-dcp.c81
-rw-r--r--drivers/crypto/omap-aes.c8
-rw-r--r--drivers/crypto/omap-crypto.c2
-rw-r--r--drivers/crypto/omap-des.c8
-rw-r--r--drivers/crypto/omap-sham.c68
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c8
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_drv.c14
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c19
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c21
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c14
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c16
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c19
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c21
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c14
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c16
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h8
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h21
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c13
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c42
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c78
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c12
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c64
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c19
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c21
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c14
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c16
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c4
-rw-r--r--drivers/firmware/smccc/smccc.c17
57 files changed, 1341 insertions, 601 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 3f166c8a4099..239eca4d6805 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -524,6 +524,20 @@ config HW_RANDOM_XIPHERA
To compile this driver as a module, choose M here: the
module will be called xiphera-trng.
+config HW_RANDOM_ARM_SMCCC_TRNG
+ tristate "Arm SMCCC TRNG firmware interface support"
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ default HW_RANDOM
+ help
+ Say 'Y' to enable the True Random Number Generator driver using
+ the Arm SMCCC TRNG firmware interface. This reads entropy from
+ higher exception levels (firmware, hypervisor). Uses SMCCC for
+ communicating with the firmware:
+ https://developer.arm.com/documentation/den0098/latest/
+
+ To compile this driver as a module, choose M here: the
+ module will be called arm_smccc_trng.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 8933fada74f2..a5a1c765a394 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -45,3 +45,4 @@ obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
+obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index d8d4ef5214a1..c22d4184bb61 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -124,7 +124,7 @@ static struct hwrng amd_rng = {
.read = amd_rng_read,
};
-static int __init mod_init(void)
+static int __init amd_rng_mod_init(void)
{
int err;
struct pci_dev *pdev = NULL;
@@ -188,7 +188,7 @@ out:
return err;
}
-static void __exit mod_exit(void)
+static void __exit amd_rng_mod_exit(void)
{
struct amd768_priv *priv;
@@ -203,8 +203,8 @@ static void __exit mod_exit(void)
kfree(priv);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(amd_rng_mod_init);
+module_exit(amd_rng_mod_exit);
MODULE_AUTHOR("The Linux Kernel team");
MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets");
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
new file mode 100644
index 000000000000..b24ac39a903b
--- /dev/null
+++ b/drivers/char/hw_random/arm_smccc_trng.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Randomness driver for the ARM SMCCC TRNG Firmware Interface
+ * https://developer.arm.com/documentation/den0098/latest/
+ *
+ * Copyright (C) 2020 Arm Ltd.
+ *
+ * The ARM TRNG firmware interface specifies a protocol to read entropy
+ * from a higher exception level, to abstract from any machine specific
+ * implemenations and allow easier use in hypervisors.
+ *
+ * The firmware interface is realised using the SMCCC specification.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hw_random.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/arm-smccc.h>
+
+#ifdef CONFIG_ARM64
+#define ARM_SMCCC_TRNG_RND ARM_SMCCC_TRNG_RND64
+#define MAX_BITS_PER_CALL (3 * 64UL)
+#else
+#define ARM_SMCCC_TRNG_RND ARM_SMCCC_TRNG_RND32
+#define MAX_BITS_PER_CALL (3 * 32UL)
+#endif
+
+/* We don't want to allow the firmware to stall us forever. */
+#define SMCCC_TRNG_MAX_TRIES 20
+
+#define SMCCC_RET_TRNG_INVALID_PARAMETER -2
+#define SMCCC_RET_TRNG_NO_ENTROPY -3
+
+static int copy_from_registers(char *buf, struct arm_smccc_res *res,
+ size_t bytes)
+{
+ unsigned int chunk, copied;
+
+ if (bytes == 0)
+ return 0;
+
+ chunk = min(bytes, sizeof(long));
+ memcpy(buf, &res->a3, chunk);
+ copied = chunk;
+ if (copied >= bytes)
+ return copied;
+
+ chunk = min((bytes - copied), sizeof(long));
+ memcpy(&buf[copied], &res->a2, chunk);
+ copied += chunk;
+ if (copied >= bytes)
+ return copied;
+
+ chunk = min((bytes - copied), sizeof(long));
+ memcpy(&buf[copied], &res->a1, chunk);
+
+ return copied + chunk;
+}
+
+static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct arm_smccc_res res;
+ u8 *buf = data;
+ unsigned int copied = 0;
+ int tries = 0;
+
+ while (copied < max) {
+ size_t bits = min_t(size_t, (max - copied) * BITS_PER_BYTE,
+ MAX_BITS_PER_CALL);
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
+ if ((int)res.a0 < 0)
+ return (int)res.a0;
+
+ switch ((int)res.a0) {
+ case SMCCC_RET_SUCCESS:
+ copied += copy_from_registers(buf + copied, &res,
+ bits / BITS_PER_BYTE);
+ tries = 0;
+ break;
+ case SMCCC_RET_TRNG_NO_ENTROPY:
+ if (!wait)
+ return copied;
+ tries++;
+ if (tries >= SMCCC_TRNG_MAX_TRIES)
+ return copied;
+ cond_resched();
+ break;
+ }
+ }
+
+ return copied;
+}
+
+static int smccc_trng_probe(struct platform_device *pdev)
+{
+ struct hwrng *trng;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->name = "smccc_trng";
+ trng->read = smccc_trng_read;
+
+ platform_set_drvdata(pdev, trng);
+
+ return devm_hwrng_register(&pdev->dev, trng);
+}
+
+static struct platform_driver smccc_trng_driver = {
+ .driver = {
+ .name = "smccc_trng",
+ },
+ .probe = smccc_trng_probe,
+};
+module_platform_driver(smccc_trng_driver);
+
+MODULE_ALIAS("platform:smccc_trng");
+MODULE_AUTHOR("Andre Przywara");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index e1d421a36a13..138ce434f86b 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -83,7 +83,7 @@ static struct hwrng geode_rng = {
};
-static int __init mod_init(void)
+static int __init geode_rng_init(void)
{
int err = -ENODEV;
struct pci_dev *pdev = NULL;
@@ -124,7 +124,7 @@ err_unmap:
goto out;
}
-static void __exit mod_exit(void)
+static void __exit geode_rng_exit(void)
{
void __iomem *mem = (void __iomem *)geode_rng.priv;
@@ -132,8 +132,8 @@ static void __exit mod_exit(void)
iounmap(mem);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(geode_rng_init);
+module_exit(geode_rng_exit);
MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index d740b8814bf3..7b171cb3b825 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -325,7 +325,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
}
-static int __init mod_init(void)
+static int __init intel_rng_mod_init(void)
{
int err = -ENODEV;
int i;
@@ -403,7 +403,7 @@ out:
}
-static void __exit mod_exit(void)
+static void __exit intel_rng_mod_exit(void)
{
void __iomem *mem = (void __iomem *)intel_rng.priv;
@@ -411,8 +411,8 @@ static void __exit mod_exit(void)
iounmap(mem);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(intel_rng_mod_init);
+module_exit(intel_rng_mod_exit);
MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 39943bc3651a..7444cc146e86 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -192,7 +192,7 @@ static struct hwrng via_rng = {
};
-static int __init mod_init(void)
+static int __init via_rng_mod_init(void)
{
int err;
@@ -209,13 +209,13 @@ static int __init mod_init(void)
out:
return err;
}
-module_init(mod_init);
+module_init(via_rng_mod_init);
-static void __exit mod_exit(void)
+static void __exit via_rng_mod_exit(void)
{
hwrng_unregister(&via_rng);
}
-module_exit(mod_exit);
+module_exit(via_rng_mod_exit);
static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL),
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index cd1baee424a1..b3a9bbfb8831 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -26,8 +26,7 @@ void sun8i_ce_prng_exit(struct crypto_tfm *tfm)
{
struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
- memzero_explicit(ctx->seed, ctx->slen);
- kfree(ctx->seed);
+ kfree_sensitive(ctx->seed);
ctx->seed = NULL;
ctx->slen = 0;
}
@@ -38,8 +37,7 @@ int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed,
struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
if (ctx->seed && ctx->slen != slen) {
- memzero_explicit(ctx->seed, ctx->slen);
- kfree(ctx->seed);
+ kfree_sensitive(ctx->seed);
ctx->slen = 0;
ctx->seed = NULL;
}
@@ -157,9 +155,8 @@ err_dst:
memcpy(dst, d, dlen);
memcpy(ctx->seed, d + dlen, ctx->slen);
}
- memzero_explicit(d, todo);
err_iv:
- kfree(d);
+ kfree_sensitive(d);
err_mem:
return err;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index 5b7af4498bd5..19cd2e52f89d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -95,9 +95,8 @@ err_pm:
memcpy(data, d, max);
err = max;
}
- memzero_explicit(d, todo);
err_dst:
- kfree(d);
+ kfree_sensitive(d);
return err;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
index 3191527928e4..246a6782674c 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -20,8 +20,7 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
if (ctx->seed && ctx->slen != slen) {
- memzero_explicit(ctx->seed, ctx->slen);
- kfree(ctx->seed);
+ kfree_sensitive(ctx->seed);
ctx->slen = 0;
ctx->seed = NULL;
}
@@ -48,8 +47,7 @@ void sun8i_ss_prng_exit(struct crypto_tfm *tfm)
{
struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
- memzero_explicit(ctx->seed, ctx->slen);
- kfree(ctx->seed);
+ kfree_sensitive(ctx->seed);
ctx->seed = NULL;
ctx->slen = 0;
}
@@ -167,9 +165,8 @@ err_iv:
/* Update seed */
memcpy(ctx->seed, d + dlen, ctx->slen);
}
- memzero_explicit(d, todo);
err_free:
- kfree(d);
+ kfree_sensitive(d);
return err;
}
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index b1d286004295..9391ccc03382 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -143,6 +143,7 @@ struct atmel_aes_xts_ctx {
struct atmel_aes_base_ctx base;
u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
+ struct crypto_skcipher *fallback_tfm;
};
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -155,6 +156,7 @@ struct atmel_aes_authenc_ctx {
struct atmel_aes_reqctx {
unsigned long mode;
u8 lastc[AES_BLOCK_SIZE];
+ struct skcipher_request fallback_req;
};
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -418,24 +420,15 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
return len ? block_size - len : 0;
}
-static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
+static struct atmel_aes_dev *atmel_aes_dev_alloc(struct atmel_aes_base_ctx *ctx)
{
- struct atmel_aes_dev *aes_dd = NULL;
- struct atmel_aes_dev *tmp;
+ struct atmel_aes_dev *aes_dd;
spin_lock_bh(&atmel_aes.lock);
- if (!ctx->dd) {
- list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
- aes_dd = tmp;
- break;
- }
- ctx->dd = aes_dd;
- } else {
- aes_dd = ctx->dd;
- }
-
+ /* One AES IP per SoC. */
+ aes_dd = list_first_entry_or_null(&atmel_aes.dev_list,
+ struct atmel_aes_dev, list);
spin_unlock_bh(&atmel_aes.lock);
-
return aes_dd;
}
@@ -967,7 +960,6 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
ctx = crypto_tfm_ctx(areq->tfm);
dd->areq = areq;
- dd->ctx = ctx;
start_async = (areq != new_areq);
dd->is_async = start_async;
@@ -1083,12 +1075,48 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
return atmel_aes_ctr_transfer(dd);
}
+static int atmel_aes_xts_fallback(struct skcipher_request *req, bool enc)
+{
+ struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ return enc ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+ crypto_skcipher_decrypt(&rctx->fallback_req);
+}
+
static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct atmel_aes_reqctx *rctx;
- struct atmel_aes_dev *dd;
+ u32 opmode = mode & AES_FLAGS_OPMODE_MASK;
+
+ if (opmode == AES_FLAGS_XTS) {
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(req->cryptlen, XTS_BLOCK_SIZE))
+ return atmel_aes_xts_fallback(req,
+ mode & AES_FLAGS_ENCRYPT);
+ }
+
+ /*
+ * ECB, CBC, CFB, OFB or CTR mode require the plaintext and ciphertext
+ * to have a positve integer length.
+ */
+ if (!req->cryptlen && opmode != AES_FLAGS_XTS)
+ return 0;
+
+ if ((opmode == AES_FLAGS_ECB || opmode == AES_FLAGS_CBC) &&
+ !IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
+ return -EINVAL;
switch (mode & AES_FLAGS_OPMODE_MASK) {
case AES_FLAGS_CFB8:
@@ -1113,14 +1141,10 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
}
ctx->is_aead = false;
- dd = atmel_aes_find_dev(ctx);
- if (!dd)
- return -ENODEV;
-
rctx = skcipher_request_ctx(req);
rctx->mode = mode;
- if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB &&
+ if (opmode != AES_FLAGS_ECB &&
!(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
@@ -1130,7 +1154,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
ivsize, 0);
}
- return atmel_aes_handle_queue(dd, &req->base);
+ return atmel_aes_handle_queue(ctx->dd, &req->base);
}
static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -1242,8 +1266,15 @@ static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
{
struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct atmel_aes_dev *dd;
+
+ dd = atmel_aes_dev_alloc(&ctx->base);
+ if (!dd)
+ return -ENODEV;
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+ ctx->base.dd = dd;
+ ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_start;
return 0;
@@ -1252,8 +1283,15 @@ static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
{
struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct atmel_aes_dev *dd;
+
+ dd = atmel_aes_dev_alloc(&ctx->base);
+ if (!dd)
+ return -ENODEV;
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+ ctx->base.dd = dd;
+ ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_ctr_start;
return 0;
@@ -1290,7 +1328,7 @@ static struct skcipher_alg aes_algs[] = {
{
.base.cra_name = "ofb(aes)",
.base.cra_driver_name = "atmel-ofb-aes",
- .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.init = atmel_aes_init_tfm,
@@ -1691,20 +1729,15 @@ static int atmel_aes_gcm_crypt(struct aead_request *req,
{
struct atmel_aes_base_ctx *ctx;
struct atmel_aes_reqctx *rctx;
- struct atmel_aes_dev *dd;
ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
ctx->block_size = AES_BLOCK_SIZE;
ctx->is_aead = true;
- dd = atmel_aes_find_dev(ctx);
- if (!dd)
- return -ENODEV;
-
rctx = aead_request_ctx(req);
rctx->mode = AES_FLAGS_GCM | mode;
- return atmel_aes_handle_queue(dd, &req->base);
+ return atmel_aes_handle_queue(ctx->dd, &req->base);
}
static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -1742,8 +1775,15 @@ static int atmel_aes_gcm_decrypt(struct aead_request *req)
static int atmel_aes_gcm_init(struct crypto_aead *tfm)
{
struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct atmel_aes_dev *dd;
+
+ dd = atmel_aes_dev_alloc(&ctx->base);
+ if (!dd)
+ return -ENODEV;
crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+ ctx->base.dd = dd;
+ ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_gcm_start;
return 0;
@@ -1819,12 +1859,8 @@ static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
* the order of the ciphered tweak bytes need to be reversed before
* writing them into the ODATARx registers.
*/
- for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
- u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
-
- tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
- tweak_bytes[i] = tmp;
- }
+ for (i = 0; i < AES_BLOCK_SIZE/2; ++i)
+ swap(tweak_bytes[i], tweak_bytes[AES_BLOCK_SIZE - 1 - i]);
/* Process the data. */
atmel_aes_write_ctrl(dd, use_dma, NULL);
@@ -1849,6 +1885,13 @@ static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (err)
return err;
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+ if (err)
+ return err;
+
memcpy(ctx->base.key, key, keylen/2);
memcpy(ctx->key2, key + keylen/2, keylen/2);
ctx->base.keylen = keylen/2;
@@ -1869,18 +1912,40 @@ static int atmel_aes_xts_decrypt(struct skcipher_request *req)
static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct atmel_aes_dev *dd;
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
- crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+ dd = atmel_aes_dev_alloc(&ctx->base);
+ if (!dd)
+ return -ENODEV;
+
+ ctx->fallback_tfm = crypto_alloc_skcipher(tfm_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm))
+ return PTR_ERR(ctx->fallback_tfm);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm));
+ ctx->base.dd = dd;
+ ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_xts_start;
return 0;
}
+static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->fallback_tfm);
+}
+
static struct skcipher_alg aes_xts_alg = {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "atmel-xts-aes",
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -1889,6 +1954,7 @@ static struct skcipher_alg aes_xts_alg = {
.encrypt = atmel_aes_xts_encrypt,
.decrypt = atmel_aes_xts_decrypt,
.init = atmel_aes_xts_init_tfm,
+ .exit = atmel_aes_xts_exit_tfm,
};
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -2075,6 +2141,11 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
{
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
+ struct atmel_aes_dev *dd;
+
+ dd = atmel_aes_dev_alloc(&ctx->base);
+ if (!dd)
+ return -ENODEV;
ctx->auth = atmel_sha_authenc_spawn(auth_mode);
if (IS_ERR(ctx->auth))
@@ -2082,6 +2153,8 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
auth_reqsize));
+ ctx->base.dd = dd;
+ ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_authenc_start;
return 0;
@@ -2127,7 +2200,6 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
u32 authsize = crypto_aead_authsize(tfm);
bool enc = (mode & AES_FLAGS_ENCRYPT);
- struct atmel_aes_dev *dd;
/* Compute text length. */
if (!enc && req->cryptlen < authsize)
@@ -2146,11 +2218,7 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
ctx->block_size = AES_BLOCK_SIZE;
ctx->is_aead = true;
- dd = atmel_aes_find_dev(ctx);
- if (!dd)
- return -ENODEV;
-
- return atmel_aes_handle_queue(dd, &req->base);
+ return atmel_aes_handle_queue(ctx->dd, &req->base);
}
static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
@@ -2358,7 +2426,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
{
- alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_flags |= CRYPTO_ALG_ASYNC;
alg->cra_alignmask = 0xf;
alg->cra_priority = ATMEL_AES_PRIORITY;
alg->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 6f01c51e3c37..e30786ec9f2d 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -196,23 +196,15 @@ static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
atmel_tdes_write(dd, offset, *value);
}
-static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
+static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
{
- struct atmel_tdes_dev *tdes_dd = NULL;
- struct atmel_tdes_dev *tmp;
+ struct atmel_tdes_dev *tdes_dd;
spin_lock_bh(&atmel_tdes.lock);
- if (!ctx->dd) {
- list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
- tdes_dd = tmp;
- break;
- }
- ctx->dd = tdes_dd;
- } else {
- tdes_dd = ctx->dd;
- }
+ /* One TDES IP per SoC. */
+ tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
+ struct atmel_tdes_dev, list);
spin_unlock_bh(&atmel_tdes.lock);
-
return tdes_dd;
}
@@ -320,7 +312,7 @@ static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
dd->buf_out, dd->buflen, dd->dma_size, 1);
if (count != dd->dma_size) {
err = -EINVAL;
- pr_err("not all data converted: %zu\n", count);
+ dev_dbg(dd->dev, "not all data converted: %zu\n", count);
}
}
@@ -337,24 +329,24 @@ static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
dd->buflen &= ~(DES_BLOCK_SIZE - 1);
if (!dd->buf_in || !dd->buf_out) {
- dev_err(dd->dev, "unable to alloc pages.\n");
+ dev_dbg(dd->dev, "unable to alloc pages.\n");
goto err_alloc;
}
/* MAP here */
dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
dd->buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
- dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
- err = -EINVAL;
+ err = dma_mapping_error(dd->dev, dd->dma_addr_in);
+ if (err) {
+ dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
goto err_map_in;
}
dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
dd->buflen, DMA_FROM_DEVICE);
- if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
- dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
- err = -EINVAL;
+ err = dma_mapping_error(dd->dev, dd->dma_addr_out);
+ if (err) {
+ dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
goto err_map_out;
}
@@ -367,8 +359,6 @@ err_map_in:
err_alloc:
free_page((unsigned long)dd->buf_out);
free_page((unsigned long)dd->buf_in);
- if (err)
- pr_err("error: %d\n", err);
return err;
}
@@ -520,14 +510,14 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
if (!err) {
- dev_err(dd->dev, "dma_map_sg() error\n");
+ dev_dbg(dd->dev, "dma_map_sg() error\n");
return -EINVAL;
}
err = dma_map_sg(dd->dev, dd->out_sg, 1,
DMA_FROM_DEVICE);
if (!err) {
- dev_err(dd->dev, "dma_map_sg() error\n");
+ dev_dbg(dd->dev, "dma_map_sg() error\n");
dma_unmap_sg(dd->dev, dd->in_sg, 1,
DMA_TO_DEVICE);
return -EINVAL;
@@ -646,7 +636,6 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
rctx->mode &= TDES_FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
dd->ctx = ctx;
- ctx->dd = dd;
err = atmel_tdes_write_ctrl(dd);
if (!err)
@@ -679,7 +668,7 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
dd->buf_out, dd->buflen, dd->dma_size, 1);
if (count != dd->dma_size) {
err = -EINVAL;
- pr_err("not all data converted: %zu\n", count);
+ dev_dbg(dd->dev, "not all data converted: %zu\n", count);
}
}
}
@@ -691,11 +680,15 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
+ struct device *dev = ctx->dd->dev;
+
+ if (!req->cryptlen)
+ return 0;
switch (mode & TDES_FLAGS_OPMODE_MASK) {
case TDES_FLAGS_CFB8:
if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of CFB8 blocks\n");
+ dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB8_BLOCK_SIZE;
@@ -703,7 +696,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
case TDES_FLAGS_CFB16:
if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of CFB16 blocks\n");
+ dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB16_BLOCK_SIZE;
@@ -711,7 +704,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
case TDES_FLAGS_CFB32:
if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of CFB32 blocks\n");
+ dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
@@ -719,7 +712,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
default:
if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of DES blocks\n");
+ dev_dbg(dev, "request size is not exact amount of DES blocks\n");
return -EINVAL;
}
ctx->block_size = DES_BLOCK_SIZE;
@@ -897,14 +890,13 @@ static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
{
struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct atmel_tdes_dev *dd;
-
- crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
- dd = atmel_tdes_find_dev(ctx);
- if (!dd)
+ ctx->dd = atmel_tdes_dev_alloc();
+ if (!ctx->dd)
return -ENODEV;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
+
return 0;
}
@@ -999,7 +991,7 @@ static struct skcipher_alg tdes_algs[] = {
{
.base.cra_name = "ofb(des)",
.base.cra_driver_name = "atmel-ofb-des",
- .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_blocksize = 1,
.base.cra_alignmask = 0x7,
.min_keysize = DES_KEY_SIZE,
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 91808402e0bf..2ecb0e1f65d8 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -300,6 +300,9 @@ static int __sev_platform_shutdown_locked(int *error)
struct sev_device *sev = psp_master->sev_data;
int ret;
+ if (sev->state == SEV_STATE_UNINIT)
+ return 0;
+
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
if (ret)
return ret;
@@ -1019,6 +1022,20 @@ e_err:
return ret;
}
+static void sev_firmware_shutdown(struct sev_device *sev)
+{
+ sev_platform_shutdown(NULL);
+
+ if (sev_es_tmr) {
+ /* The TMR area was encrypted, flush it from the cache */
+ wbinvd_on_all_cpus();
+
+ free_pages((unsigned long)sev_es_tmr,
+ get_order(SEV_ES_TMR_SIZE));
+ sev_es_tmr = NULL;
+ }
+}
+
void sev_dev_destroy(struct psp_device *psp)
{
struct sev_device *sev = psp->sev_data;
@@ -1026,6 +1043,8 @@ void sev_dev_destroy(struct psp_device *psp)
if (!sev)
return;
+ sev_firmware_shutdown(sev);
+
if (sev->misc)
kref_put(&misc_dev->refcount, sev_exit);
@@ -1056,21 +1075,6 @@ void sev_pci_init(void)
if (sev_get_api_version())
goto err;
- /*
- * If platform is not in UNINIT state then firmware upgrade and/or
- * platform INIT command will fail. These command require UNINIT state.
- *
- * In a normal boot we should never run into case where the firmware
- * is not in UNINIT state on boot. But in case of kexec boot, a reboot
- * may not go through a typical shutdown sequence and may leave the
- * firmware in INIT or WORKING state.
- */
-
- if (sev->state != SEV_STATE_UNINIT) {
- sev_platform_shutdown(NULL);
- sev->state = SEV_STATE_UNINIT;
- }
-
if (sev_version_greater_or_equal(0, 15) &&
sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
@@ -1115,17 +1119,10 @@ err:
void sev_pci_exit(void)
{
- if (!psp_master->sev_data)
- return;
-
- sev_platform_shutdown(NULL);
+ struct sev_device *sev = psp_master->sev_data;
- if (sev_es_tmr) {
- /* The TMR area was encrypted, flush it from the cache */
- wbinvd_on_all_cpus();
+ if (!sev)
+ return;
- free_pages((unsigned long)sev_es_tmr,
- get_order(SEV_ES_TMR_SIZE));
- sev_es_tmr = NULL;
- }
+ sev_firmware_shutdown(sev);
}
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 6fb6ba35f89d..88c672ad27e4 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -241,6 +241,17 @@ e_err:
return ret;
}
+static void sp_pci_shutdown(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ if (!sp)
+ return;
+
+ sp_destroy(sp);
+}
+
static void sp_pci_remove(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
@@ -351,6 +362,12 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv3,
#endif
},
+ { /* 5 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv2,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
@@ -359,6 +376,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
+ { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
/* Last entry must be zero */
{ 0, }
};
@@ -371,6 +389,7 @@ static struct pci_driver sp_pci_driver = {
.id_table = sp_pci_table,
.probe = sp_pci_probe,
.remove = sp_pci_remove,
+ .shutdown = sp_pci_shutdown,
.driver.pm = &sp_pci_pm_ops,
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 8b0640fb04be..65a641396c07 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/topology.h>
#include <linux/uacce.h>
#include "hpre.h"
@@ -81,6 +82,16 @@
#define HPRE_PREFETCH_DISABLE BIT(30)
#define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
+/* clock gate */
+#define HPRE_CLKGATE_CTL 0x301a10
+#define HPRE_PEH_CFG_AUTO_GATE 0x301a2c
+#define HPRE_CLUSTER_DYN_CTL 0x302010
+#define HPRE_CORE_SHB_CFG 0x302088
+#define HPRE_CLKGATE_CTL_EN BIT(0)
+#define HPRE_PEH_CFG_AUTO_GATE_EN BIT(0)
+#define HPRE_CLUSTER_DYN_CTL_EN BIT(0)
+#define HPRE_CORE_GATE_EN (BIT(30) | BIT(31))
+
#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
#define HPRE_WR_MSI_PORT BIT(2)
@@ -417,12 +428,63 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
pci_err(qm->pdev, "failed to close sva prefetch\n");
}
+static void hpre_enable_clock_gate(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl(qm->io_base + HPRE_CLKGATE_CTL);
+ val |= HPRE_CLKGATE_CTL_EN;
+ writel(val, qm->io_base + HPRE_CLKGATE_CTL);
+
+ val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+ val |= HPRE_PEH_CFG_AUTO_GATE_EN;
+ writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+
+ val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
+ val |= HPRE_CLUSTER_DYN_CTL_EN;
+ writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
+
+ val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
+ val |= HPRE_CORE_GATE_EN;
+ writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+}
+
+static void hpre_disable_clock_gate(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl(qm->io_base + HPRE_CLKGATE_CTL);
+ val &= ~HPRE_CLKGATE_CTL_EN;
+ writel(val, qm->io_base + HPRE_CLKGATE_CTL);
+
+ val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+ val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
+ writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+
+ val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
+ val &= ~HPRE_CLUSTER_DYN_CTL_EN;
+ writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
+
+ val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
+ val &= ~HPRE_CORE_GATE_EN;
+ writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+}
+
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
u32 val;
int ret;
+ /* disabel dynamic clock gate before sram init */
+ hpre_disable_clock_gate(qm);
+
writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
@@ -473,6 +535,8 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
/* Config data buffer pasid needed by Kunpeng 920 */
hpre_config_pasid(qm);
+ hpre_enable_clock_gate(qm);
+
return ret;
}
@@ -595,10 +659,15 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
struct hpre_debugfs_file *file = filp->private_data;
+ struct hisi_qm *qm = hpre_file_to_qm(file);
char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
u32 val;
int ret;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
spin_lock_irq(&file->lock);
switch (file->type) {
case HPRE_CLEAR_ENABLE:
@@ -608,18 +677,25 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
val = hpre_cluster_inqry_read(file);
break;
default:
- spin_unlock_irq(&file->lock);
- return -EINVAL;
+ goto err_input;
}
spin_unlock_irq(&file->lock);
+
+ hisi_qm_put_dfx_access(qm);
ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+ spin_unlock_irq(&file->lock);
+ hisi_qm_put_dfx_access(qm);
+ return -EINVAL;
}
static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct hpre_debugfs_file *file = filp->private_data;
+ struct hisi_qm *qm = hpre_file_to_qm(file);
char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
unsigned long val;
int len, ret;
@@ -639,6 +715,10 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
if (kstrtoul(tbuf, 0, &val))
return -EFAULT;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
spin_lock_irq(&file->lock);
switch (file->type) {
case HPRE_CLEAR_ENABLE:
@@ -655,12 +735,12 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
ret = -EINVAL;
goto err_input;
}
- spin_unlock_irq(&file->lock);
- return count;
+ ret = count;
err_input:
spin_unlock_irq(&file->lock);
+ hisi_qm_put_dfx_access(qm);
return ret;
}
@@ -700,6 +780,24 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
hpre_debugfs_atomic64_set, "%llu\n");
+static int hpre_com_regs_show(struct seq_file *s, void *unused)
+{
+ hisi_qm_regs_dump(s, s->private);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);
+
+static int hpre_cluster_regs_show(struct seq_file *s, void *unused)
+{
+ hisi_qm_regs_dump(s, s->private);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);
+
static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
@@ -737,8 +835,11 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
regset->regs = hpre_com_dfx_regs;
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
+ regset->dev = dev;
+
+ debugfs_create_file("regs", 0444, qm->debug.debug_root,
+ regset, &hpre_com_regs_fops);
- debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset);
return 0;
}
@@ -764,8 +865,10 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
regset->regs = hpre_cluster_dfx_regs;
regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
regset->base = qm->io_base + hpre_cluster_offsets[i];
+ regset->dev = dev;
- debugfs_create_regset32("regs", 0444, tmp_d, regset);
+ debugfs_create_file("regs", 0444, tmp_d, regset,
+ &hpre_cluster_regs_fops);
ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
@@ -1017,6 +1120,8 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_with_alg_register;
}
+ hisi_qm_pm_init(qm);
+
return 0;
err_with_alg_register:
@@ -1040,6 +1145,7 @@ static void hpre_remove(struct pci_dev *pdev)
struct hisi_qm *qm = pci_get_drvdata(pdev);
int ret;
+ hisi_qm_pm_uninit(qm);
hisi_qm_wait_task_finish(qm, &hpre_devices);
hisi_qm_alg_unregister(qm, &hpre_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
@@ -1062,6 +1168,10 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_uninit(qm);
}
+static const struct dev_pm_ops hpre_pm_ops = {
+ SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
static const struct pci_error_handlers hpre_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
.slot_reset = hisi_qm_dev_slot_reset,
@@ -1078,6 +1188,7 @@ static struct pci_driver hpre_pci_driver = {
hisi_qm_sriov_configure : NULL,
.err_handler = &hpre_err_handler,
.shutdown = hisi_qm_dev_shutdown,
+ .driver.pm = &hpre_pm_ops,
};
static void hpre_register_debugfs(void)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 1d67f94a1d56..369562d34d66 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -4,12 +4,12 @@
#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/bitmap.h>
-#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/io.h>
#include <linux/irqreturn.h>
#include <linux/log2.h>
+#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uacce.h>
@@ -270,6 +270,8 @@
#define QM_QOS_MAX_CIR_S 11
#define QM_QOS_VAL_MAX_LEN 32
+#define QM_AUTOSUSPEND_DELAY 3000
+
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
@@ -734,6 +736,34 @@ static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
return QM_IRQ_NUM_VF_V3;
}
+static int qm_pm_get_sync(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get_sync(%d).\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qm_pm_put_sync(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ return;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{
u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
@@ -1173,16 +1203,13 @@ static struct hisi_qm *file_to_qm(struct debugfs_file *file)
return container_of(debug, struct hisi_qm, debug);
}
-static u32 current_q_read(struct debugfs_file *file)
+static u32 current_q_read(struct hisi_qm *qm)
{
- struct hisi_qm *qm = file_to_qm(file);
-
return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
}
-static int current_q_write(struct debugfs_file *file, u32 val)
+static int current_q_write(struct hisi_qm *qm, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
u32 tmp;
if (val >= qm->debug.curr_qm_qp_num)
@@ -1199,18 +1226,14 @@ static int current_q_write(struct debugfs_file *file, u32 val)
return 0;
}
-static u32 clear_enable_read(struct debugfs_file *file)
+static u32 clear_enable_read(struct hisi_qm *qm)
{
- struct hisi_qm *qm = file_to_qm(file);
-
return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
}
/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
-static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
+static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
{
- struct hisi_qm *qm = file_to_qm(file);
-
if (rd_clr_ctrl > 1)
return -EINVAL;
@@ -1219,16 +1242,13 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
return 0;
}
-static u32 current_qm_read(struct debugfs_file *file)
+static u32 current_qm_read(struct hisi_qm *qm)
{
- struct hisi_qm *qm = file_to_qm(file);
-
return readl(qm->io_base + QM_DFX_MB_CNT_VF);
}
-static int current_qm_write(struct debugfs_file *file, u32 val)
+static int current_qm_write(struct hisi_qm *qm, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
u32 tmp;
if (val > qm->vfs_num)
@@ -1259,29 +1279,39 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf,
{
struct debugfs_file *file = filp->private_data;
enum qm_debug_file index = file->index;
+ struct hisi_qm *qm = file_to_qm(file);
char tbuf[QM_DBG_TMP_BUF_LEN];
u32 val;
int ret;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
mutex_lock(&file->lock);
switch (index) {
case CURRENT_QM:
- val = current_qm_read(file);
+ val = current_qm_read(qm);
break;
case CURRENT_Q:
- val = current_q_read(file);
+ val = current_q_read(qm);
break;
case CLEAR_ENABLE:
- val = clear_enable_read(file);
+ val = clear_enable_read(qm);
break;
default:
- mutex_unlock(&file->lock);
- return -EINVAL;
+ goto err_input;
}
mutex_unlock(&file->lock);
+ hisi_qm_put_dfx_access(qm);
ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+ mutex_unlock(&file->lock);
+ hisi_qm_put_dfx_access(qm);
+ return -EINVAL;
}
static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
@@ -1289,6 +1319,7 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
{
struct debugfs_file *file = filp->private_data;
enum qm_debug_file index = file->index;
+ struct hisi_qm *qm = file_to_qm(file);
unsigned long val;
char tbuf[QM_DBG_TMP_BUF_LEN];
int len, ret;
@@ -1308,22 +1339,28 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
if (kstrtoul(tbuf, 0, &val))
return -EFAULT;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
mutex_lock(&file->lock);
switch (index) {
case CURRENT_QM:
- ret = current_qm_write(file, val);
+ ret = current_qm_write(qm, val);
break;
case CURRENT_Q:
- ret = current_q_write(file, val);
+ ret = current_q_write(qm, val);
break;
case CLEAR_ENABLE:
- ret = clear_enable_write(file, val);
+ ret = clear_enable_write(qm, val);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&file->lock);
+ hisi_qm_put_dfx_access(qm);
+
if (ret)
return ret;
@@ -1337,13 +1374,8 @@ static const struct file_operations qm_debug_fops = {
.write = qm_debug_write,
};
-struct qm_dfx_registers {
- char *reg_name;
- u64 reg_offset;
-};
-
#define CNT_CYC_REGS_NUM 10
-static struct qm_dfx_registers qm_dfx_regs[] = {
+static const struct debugfs_reg32 qm_dfx_regs[] = {
/* XXX_CNT are reading clear register */
{"QM_ECC_1BIT_CNT ", 0x104000ull},
{"QM_ECC_MBIT_CNT ", 0x104008ull},
@@ -1369,31 +1401,59 @@ static struct qm_dfx_registers qm_dfx_regs[] = {
{"QM_DFX_FF_ST5 ", 0x1040dcull},
{"QM_DFX_FF_ST6 ", 0x1040e0ull},
{"QM_IN_IDLE_ST ", 0x1040e4ull},
- { NULL, 0}
};
-static struct qm_dfx_registers qm_vf_dfx_regs[] = {
+static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
{"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
- { NULL, 0}
};
-static int qm_regs_show(struct seq_file *s, void *unused)
+/**
+ * hisi_qm_regs_dump() - Dump registers's value.
+ * @s: debugfs file handle.
+ * @regset: accelerator registers information.
+ *
+ * Dump accelerator registers.
+ */
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
{
- struct hisi_qm *qm = s->private;
- struct qm_dfx_registers *regs;
+ struct pci_dev *pdev = to_pci_dev(regset->dev);
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ const struct debugfs_reg32 *regs = regset->regs;
+ int regs_len = regset->nregs;
+ int i, ret;
u32 val;
- if (qm->fun_type == QM_HW_PF)
- regs = qm_dfx_regs;
- else
- regs = qm_vf_dfx_regs;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return;
- while (regs->reg_name) {
- val = readl(qm->io_base + regs->reg_offset);
- seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
- regs++;
+ for (i = 0; i < regs_len; i++) {
+ val = readl(regset->base + regs[i].offset);
+ seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
}
+ hisi_qm_put_dfx_access(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
+
+static int qm_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ struct debugfs_regset32 regset;
+
+ if (qm->fun_type == QM_HW_PF) {
+ regset.regs = qm_dfx_regs;
+ regset.nregs = ARRAY_SIZE(qm_dfx_regs);
+ } else {
+ regset.regs = qm_vf_dfx_regs;
+ regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
+ }
+
+ regset.base = qm->io_base;
+ regset.dev = &qm->pdev->dev;
+
+ hisi_qm_regs_dump(s, &regset);
+
return 0;
}
@@ -1823,16 +1883,24 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
if (*pos)
return 0;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
/* Judge if the instance is being reset. */
if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
return 0;
- if (count > QM_DBG_WRITE_LEN)
- return -ENOSPC;
+ if (count > QM_DBG_WRITE_LEN) {
+ ret = -ENOSPC;
+ goto put_dfx_access;
+ }
cmd_buf = memdup_user_nul(buffer, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
+ if (IS_ERR(cmd_buf)) {
+ ret = PTR_ERR(cmd_buf);
+ goto put_dfx_access;
+ }
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
@@ -1843,12 +1911,16 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
ret = qm_cmd_write_dump(qm, cmd_buf);
if (ret) {
kfree(cmd_buf);
- return ret;
+ goto put_dfx_access;
}
kfree(cmd_buf);
- return count;
+ ret = count;
+
+put_dfx_access:
+ hisi_qm_put_dfx_access(qm);
+ return ret;
}
static const struct file_operations qm_cmd_fops = {
@@ -2445,11 +2517,19 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
{
struct hisi_qp *qp;
+ int ret;
+
+ ret = qm_pm_get_sync(qm);
+ if (ret)
+ return ERR_PTR(ret);
down_write(&qm->qps_lock);
qp = qm_create_qp_nolock(qm, alg_type);
up_write(&qm->qps_lock);
+ if (IS_ERR(qp))
+ qm_pm_put_sync(qm);
+
return qp;
}
EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
@@ -2475,6 +2555,8 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
idr_remove(&qm->qp_idr, qp->qp_id);
up_write(&qm->qps_lock);
+
+ qm_pm_put_sync(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
@@ -3200,6 +3282,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
qm->misc_ctl = false;
+ if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
+ if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
+ dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
+ }
}
static void qm_cmd_uninit(struct hisi_qm *qm)
@@ -4057,10 +4143,15 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
u32 qos_val, ir;
int ret;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
/* Mailbox and reset cannot be operated at the same time */
if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto err_put_dfx_access;
}
if (qm->fun_type == QM_HW_PF) {
@@ -4079,6 +4170,8 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
err_get_status:
clear_bit(QM_RESETTING, &qm->misc_ctl);
+err_put_dfx_access:
+ hisi_qm_put_dfx_access(qm);
return ret;
}
@@ -4159,15 +4252,23 @@ static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
fun_index = device * 8 + function;
+ ret = qm_pm_get_sync(qm);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_get_status;
+ }
+
ret = qm_func_shaper_enable(qm, fun_index, val);
if (ret) {
pci_err(qm->pdev, "failed to enable function shaper!\n");
ret = -EINVAL;
- goto err_get_status;
+ goto err_put_sync;
}
- ret = count;
+ ret = count;
+err_put_sync:
+ qm_pm_put_sync(qm);
err_get_status:
clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
@@ -4245,7 +4346,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
*/
void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
{
- struct qm_dfx_registers *regs;
+ const struct debugfs_reg32 *regs;
int i;
/* clear current_qm */
@@ -4264,7 +4365,7 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
regs = qm_dfx_regs;
for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
- readl(qm->io_base + regs->reg_offset);
+ readl(qm->io_base + regs->offset);
regs++;
}
@@ -4287,19 +4388,23 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
struct hisi_qm *qm = pci_get_drvdata(pdev);
int pre_existing_vfs, num_vfs, total_vfs, ret;
+ ret = qm_pm_get_sync(qm);
+ if (ret)
+ return ret;
+
total_vfs = pci_sriov_get_totalvfs(pdev);
pre_existing_vfs = pci_num_vf(pdev);
if (pre_existing_vfs) {
pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
pre_existing_vfs);
- return 0;
+ goto err_put_sync;
}
num_vfs = min_t(int, max_vfs, total_vfs);
ret = qm_vf_q_assign(qm, num_vfs);
if (ret) {
pci_err(pdev, "Can't assign queues for VF!\n");
- return ret;
+ goto err_put_sync;
}
qm->vfs_num = num_vfs;
@@ -4308,12 +4413,16 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
if (ret) {
pci_err(pdev, "Can't enable VF!\n");
qm_clear_vft_config(qm);
- return ret;
+ goto err_put_sync;
}
pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
return num_vfs;
+
+err_put_sync:
+ qm_pm_put_sync(qm);
+ return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
@@ -4328,6 +4437,7 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
+ int ret;
if (pci_vfs_assigned(pdev)) {
pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
@@ -4343,8 +4453,13 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
pci_disable_sriov(pdev);
/* clear vf function shaper configure array */
memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+ ret = qm_clear_vft_config(qm);
+ if (ret)
+ return ret;
- return qm_clear_vft_config(qm);
+ qm_pm_put_sync(qm);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
@@ -5164,11 +5279,18 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
int ret;
+ ret = qm_pm_get_sync(qm);
+ if (ret) {
+ clear_bit(QM_RST_SCHED, &qm->misc_ctl);
+ return;
+ }
+
/* reset pcie device controller */
ret = qm_controller_reset(qm);
if (ret)
dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
+ qm_pm_put_sync(qm);
}
static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
@@ -5680,6 +5802,194 @@ err_pci_init:
}
EXPORT_SYMBOL_GPL(hisi_qm_init);
+/**
+ * hisi_qm_get_dfx_access() - Try to get dfx access.
+ * @qm: pointer to accelerator device.
+ *
+ * Try to get dfx access, then user can get message.
+ *
+ * If device is in suspended, return failure, otherwise
+ * bump up the runtime PM usage counter.
+ */
+int hisi_qm_get_dfx_access(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ if (pm_runtime_suspended(dev)) {
+ dev_info(dev, "can not read/write - device in suspended.\n");
+ return -EAGAIN;
+ }
+
+ return qm_pm_get_sync(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
+
+/**
+ * hisi_qm_put_dfx_access() - Put dfx access.
+ * @qm: pointer to accelerator device.
+ *
+ * Put dfx access, drop runtime PM usage counter.
+ */
+void hisi_qm_put_dfx_access(struct hisi_qm *qm)
+{
+ qm_pm_put_sync(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
+
+/**
+ * hisi_qm_pm_init() - Initialize qm runtime PM.
+ * @qm: pointer to accelerator device.
+ *
+ * Function that initialize qm runtime PM.
+ */
+void hisi_qm_pm_init(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ return;
+
+ pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put_noidle(dev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
+
+/**
+ * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
+ * @qm: pointer to accelerator device.
+ *
+ * Function that uninitialize qm runtime PM.
+ */
+void hisi_qm_pm_uninit(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+
+ if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ return;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
+
+static int qm_prepare_for_suspend(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+ u32 val;
+
+ ret = qm->ops->set_msi(qm, false);
+ if (ret) {
+ pci_err(pdev, "failed to disable MSI before suspending!\n");
+ return ret;
+ }
+
+ /* shutdown OOO register */
+ writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
+ qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+ val,
+ (val == ACC_MASTER_TRANS_RETURN_RW),
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, false);
+ if (ret)
+ pci_err(pdev, "failed to disable MSE before suspending!\n");
+
+ return ret;
+}
+
+static int qm_rebuild_for_resume(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_set_pf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "failed to enable MSE after resuming!\n");
+ return ret;
+ }
+
+ ret = qm->ops->set_msi(qm, true);
+ if (ret) {
+ pci_err(pdev, "failed to enable MSI after resuming!\n");
+ return ret;
+ }
+
+ ret = qm_dev_hw_init(qm);
+ if (ret) {
+ pci_err(pdev, "failed to init device after resuming\n");
+ return ret;
+ }
+
+ qm_cmd_init(qm);
+ hisi_qm_dev_err_init(qm);
+
+ return 0;
+}
+
+/**
+ * hisi_qm_suspend() - Runtime suspend of given device.
+ * @dev: device to suspend.
+ *
+ * Function that suspend the device.
+ */
+int hisi_qm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ pci_info(pdev, "entering suspended state\n");
+
+ ret = hisi_qm_stop(qm, QM_NORMAL);
+ if (ret) {
+ pci_err(pdev, "failed to stop qm(%d)\n", ret);
+ return ret;
+ }
+
+ ret = qm_prepare_for_suspend(qm);
+ if (ret)
+ pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_suspend);
+
+/**
+ * hisi_qm_resume() - Runtime resume of given device.
+ * @dev: device to resume.
+ *
+ * Function that resume the device.
+ */
+int hisi_qm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ pci_info(pdev, "resuming from suspend state\n");
+
+ ret = qm_rebuild_for_resume(qm);
+ if (ret) {
+ pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
+ return ret;
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+ pci_err(pdev, "failed to start qm(%d)\n", ret);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_resume);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 035eaf8c442d..3068093229a5 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -4,6 +4,7 @@
#define HISI_ACC_QM_H
#include <linux/bitfield.h>
+#include <linux/debugfs.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -430,4 +431,11 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev);
void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_resume(struct device *dev);
+int hisi_qm_suspend(struct device *dev);
+void hisi_qm_pm_uninit(struct hisi_qm *qm);
+void hisi_qm_pm_init(struct hisi_qm *qm);
+int hisi_qm_get_dfx_access(struct hisi_qm *qm);
+void hisi_qm_put_dfx_access(struct hisi_qm *qm);
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 018415b9840a..d97cf02b1df7 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -157,11 +157,6 @@ struct sec_ctx {
struct device *dev;
};
-enum sec_endian {
- SEC_LE = 0,
- SEC_32BE,
- SEC_64BE
-};
enum sec_debug_file_index {
SEC_CLEAR_ENABLE,
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 490db7bccf61..90551bf38b52 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
#include <linux/uacce.h>
@@ -57,10 +58,16 @@
#define SEC_MEM_START_INIT_REG 0x301100
#define SEC_MEM_INIT_DONE_REG 0x301104
+/* clock gating */
#define SEC_CONTROL_REG 0x301200
-#define SEC_TRNG_EN_SHIFT 8
+#define SEC_DYNAMIC_GATE_REG 0x30121c
+#define SEC_CORE_AUTO_GATE 0x30212c
+#define SEC_DYNAMIC_GATE_EN 0x7bff
+#define SEC_CORE_AUTO_GATE_EN GENMASK(3, 0)
#define SEC_CLK_GATE_ENABLE BIT(3)
#define SEC_CLK_GATE_DISABLE (~BIT(3))
+
+#define SEC_TRNG_EN_SHIFT 8
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
@@ -312,31 +319,20 @@ static const struct pci_device_id sec_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, sec_dev_ids);
-static u8 sec_get_endian(struct hisi_qm *qm)
+static void sec_set_endian(struct hisi_qm *qm)
{
u32 reg;
- /*
- * As for VF, it is a wrong way to get endian setting by
- * reading a register of the engine
- */
- if (qm->pdev->is_virtfn) {
- dev_err_ratelimited(&qm->pdev->dev,
- "cannot access a register in VF!\n");
- return SEC_LE;
- }
reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
- /* BD little endian mode */
- if (!(reg & BIT(0)))
- return SEC_LE;
+ reg &= ~(BIT(1) | BIT(0));
+ if (!IS_ENABLED(CONFIG_64BIT))
+ reg |= BIT(1);
- /* BD 32-bits big endian mode */
- else if (!(reg & BIT(1)))
- return SEC_32BE;
- /* BD 64-bits big endian mode */
- else
- return SEC_64BE;
+ if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ reg |= BIT(0);
+
+ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
}
static void sec_open_sva_prefetch(struct hisi_qm *qm)
@@ -378,15 +374,43 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm)
pci_err(qm->pdev, "failed to close sva prefetch\n");
}
+static void sec_enable_clock_gate(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
+ val |= SEC_CLK_GATE_ENABLE;
+ writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
+
+ val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG);
+ val |= SEC_DYNAMIC_GATE_EN;
+ writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG);
+
+ val = readl(qm->io_base + SEC_CORE_AUTO_GATE);
+ val |= SEC_CORE_AUTO_GATE_EN;
+ writel(val, qm->io_base + SEC_CORE_AUTO_GATE);
+}
+
+static void sec_disable_clock_gate(struct hisi_qm *qm)
+{
+ u32 val;
+
+ /* Kunpeng920 needs to close clock gating */
+ val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
+ val &= SEC_CLK_GATE_DISABLE;
+ writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
+}
+
static int sec_engine_init(struct hisi_qm *qm)
{
int ret;
u32 reg;
- /* disable clock gate control */
- reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
- reg &= SEC_CLK_GATE_DISABLE;
- writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
+ /* disable clock gate control before mem init */
+ sec_disable_clock_gate(qm);
writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
@@ -429,9 +453,9 @@ static int sec_engine_init(struct hisi_qm *qm)
qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
/* config endian */
- reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
- reg |= sec_get_endian(qm);
- writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
+ sec_set_endian(qm);
+
+ sec_enable_clock_gate(qm);
return 0;
}
@@ -533,17 +557,14 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
}
-static u32 sec_clear_enable_read(struct sec_debug_file *file)
+static u32 sec_clear_enable_read(struct hisi_qm *qm)
{
- struct hisi_qm *qm = file->qm;
-
return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
SEC_CTRL_CNT_CLR_CE_BIT;
}
-static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
+static int sec_clear_enable_write(struct hisi_qm *qm, u32 val)
{
- struct hisi_qm *qm = file->qm;
u32 tmp;
if (val != 1 && val)
@@ -561,24 +582,34 @@ static ssize_t sec_debug_read(struct file *filp, char __user *buf,
{
struct sec_debug_file *file = filp->private_data;
char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+ struct hisi_qm *qm = file->qm;
u32 val;
int ret;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
spin_lock_irq(&file->lock);
switch (file->index) {
case SEC_CLEAR_ENABLE:
- val = sec_clear_enable_read(file);
+ val = sec_clear_enable_read(qm);
break;
default:
- spin_unlock_irq(&file->lock);
- return -EINVAL;
+ goto err_input;
}
spin_unlock_irq(&file->lock);
- ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
+ hisi_qm_put_dfx_access(qm);
+ ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+ spin_unlock_irq(&file->lock);
+ hisi_qm_put_dfx_access(qm);
+ return -EINVAL;
}
static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
@@ -586,6 +617,7 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
{
struct sec_debug_file *file = filp->private_data;
char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+ struct hisi_qm *qm = file->qm;
unsigned long val;
int len, ret;
@@ -604,11 +636,15 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
if (kstrtoul(tbuf, 0, &val))
return -EFAULT;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
spin_lock_irq(&file->lock);
switch (file->index) {
case SEC_CLEAR_ENABLE:
- ret = sec_clear_enable_write(file, val);
+ ret = sec_clear_enable_write(qm, val);
if (ret)
goto err_input;
break;
@@ -617,12 +653,11 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
goto err_input;
}
- spin_unlock_irq(&file->lock);
-
- return count;
+ ret = count;
err_input:
spin_unlock_irq(&file->lock);
+ hisi_qm_put_dfx_access(qm);
return ret;
}
@@ -653,6 +688,15 @@ static int sec_debugfs_atomic64_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
sec_debugfs_atomic64_set, "%lld\n");
+static int sec_regs_show(struct seq_file *s, void *unused)
+{
+ hisi_qm_regs_dump(s, s->private);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sec_regs);
+
static int sec_core_debug_init(struct hisi_qm *qm)
{
struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
@@ -671,9 +715,10 @@ static int sec_core_debug_init(struct hisi_qm *qm)
regset->regs = sec_dfx_regs;
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
regset->base = qm->io_base;
+ regset->dev = dev;
if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
- debugfs_create_regset32("regs", 0444, tmp_d, regset);
+ debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);
for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
@@ -981,10 +1026,13 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_alg_unregister;
}
+ hisi_qm_pm_init(qm);
+
return 0;
err_alg_unregister:
- hisi_qm_alg_unregister(qm, &sec_devices);
+ if (qm->qp_num >= ctx_q_num)
+ hisi_qm_alg_unregister(qm, &sec_devices);
err_qm_stop:
sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
@@ -999,6 +1047,7 @@ static void sec_remove(struct pci_dev *pdev)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
+ hisi_qm_pm_uninit(qm);
hisi_qm_wait_task_finish(qm, &sec_devices);
if (qm->qp_num >= ctx_q_num)
hisi_qm_alg_unregister(qm, &sec_devices);
@@ -1018,6 +1067,10 @@ static void sec_remove(struct pci_dev *pdev)
sec_qm_uninit(qm);
}
+static const struct dev_pm_ops sec_pm_ops = {
+ SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
static const struct pci_error_handlers sec_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
.slot_reset = hisi_qm_dev_slot_reset,
@@ -1033,6 +1086,7 @@ static struct pci_driver sec_pci_driver = {
.err_handler = &sec_err_handler,
.sriov_configure = hisi_qm_sriov_configure,
.shutdown = hisi_qm_dev_shutdown,
+ .driver.pm = &sec_pm_ops,
};
static void sec_register_debugfs(void)
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index f8482ceebf2a..7148201ce76e 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
#include <linux/uacce.h>
@@ -107,6 +108,14 @@
#define HZIP_DELAY_1_US 1
#define HZIP_POLL_TIMEOUT_US 1000
+/* clock gating */
+#define HZIP_PEH_CFG_AUTO_GATE 0x3011A8
+#define HZIP_PEH_CFG_AUTO_GATE_EN BIT(0)
+#define HZIP_CORE_GATED_EN GENMASK(15, 8)
+#define HZIP_CORE_GATED_OOO_EN BIT(29)
+#define HZIP_CLOCK_GATED_EN (HZIP_CORE_GATED_EN | \
+ HZIP_CORE_GATED_OOO_EN)
+
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
@@ -312,6 +321,22 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
pci_err(qm->pdev, "failed to close sva prefetch\n");
}
+static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl(qm->io_base + HZIP_CLOCK_GATE_CTRL);
+ val |= HZIP_CLOCK_GATED_EN;
+ writel(val, qm->io_base + HZIP_CLOCK_GATE_CTRL);
+
+ val = readl(qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
+ val |= HZIP_PEH_CFG_AUTO_GATE_EN;
+ writel(val, qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
+}
+
static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
@@ -359,6 +384,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+ hisi_zip_enable_clock_gate(qm);
+
return 0;
}
@@ -423,17 +450,14 @@ static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
return &hisi_zip->qm;
}
-static u32 clear_enable_read(struct ctrl_debug_file *file)
+static u32 clear_enable_read(struct hisi_qm *qm)
{
- struct hisi_qm *qm = file_to_qm(file);
-
return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
HZIP_SOFT_CTRL_CNT_CLR_CE_BIT;
}
-static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
+static int clear_enable_write(struct hisi_qm *qm, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
u32 tmp;
if (val != 1 && val != 0)
@@ -450,22 +474,33 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
struct ctrl_debug_file *file = filp->private_data;
+ struct hisi_qm *qm = file_to_qm(file);
char tbuf[HZIP_BUF_SIZE];
u32 val;
int ret;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
spin_lock_irq(&file->lock);
switch (file->index) {
case HZIP_CLEAR_ENABLE:
- val = clear_enable_read(file);
+ val = clear_enable_read(qm);
break;
default:
- spin_unlock_irq(&file->lock);
- return -EINVAL;
+ goto err_input;
}
spin_unlock_irq(&file->lock);
+
+ hisi_qm_put_dfx_access(qm);
ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+ spin_unlock_irq(&file->lock);
+ hisi_qm_put_dfx_access(qm);
+ return -EINVAL;
}
static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
@@ -473,6 +508,7 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
size_t count, loff_t *pos)
{
struct ctrl_debug_file *file = filp->private_data;
+ struct hisi_qm *qm = file_to_qm(file);
char tbuf[HZIP_BUF_SIZE];
unsigned long val;
int len, ret;
@@ -491,10 +527,14 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
if (kstrtoul(tbuf, 0, &val))
return -EFAULT;
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret)
+ return ret;
+
spin_lock_irq(&file->lock);
switch (file->index) {
case HZIP_CLEAR_ENABLE:
- ret = clear_enable_write(file, val);
+ ret = clear_enable_write(qm, val);
if (ret)
goto err_input;
break;
@@ -502,12 +542,12 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
ret = -EINVAL;
goto err_input;
}
- spin_unlock_irq(&file->lock);
- return count;
+ ret = count;
err_input:
spin_unlock_irq(&file->lock);
+ hisi_qm_put_dfx_access(qm);
return ret;
}
@@ -538,6 +578,15 @@ static int zip_debugfs_atomic64_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
zip_debugfs_atomic64_set, "%llu\n");
+static int hisi_zip_regs_show(struct seq_file *s, void *unused)
+{
+ hisi_qm_regs_dump(s, s->private);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
+
static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
@@ -560,9 +609,11 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
regset->regs = hzip_dfx_regs;
regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
regset->base = qm->io_base + core_offsets[i];
+ regset->dev = dev;
tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
- debugfs_create_regset32("regs", 0444, tmp_d, regset);
+ debugfs_create_file("regs", 0444, tmp_d, regset,
+ &hisi_zip_regs_fops);
}
return 0;
@@ -898,6 +949,8 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_qm_alg_unregister;
}
+ hisi_qm_pm_init(qm);
+
return 0;
err_qm_alg_unregister:
@@ -920,6 +973,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
+ hisi_qm_pm_uninit(qm);
hisi_qm_wait_task_finish(qm, &zip_devices);
hisi_qm_alg_unregister(qm, &zip_devices);
@@ -932,6 +986,10 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_qm_uninit(qm);
}
+static const struct dev_pm_ops hisi_zip_pm_ops = {
+ SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
static const struct pci_error_handlers hisi_zip_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
.slot_reset = hisi_qm_dev_slot_reset,
@@ -948,6 +1006,7 @@ static struct pci_driver hisi_zip_pci_driver = {
hisi_qm_sriov_configure : NULL,
.err_handler = &hisi_zip_err_handler,
.shutdown = hisi_qm_dev_shutdown,
+ .driver.pm = &hisi_zip_pm_ops,
};
static void hisi_zip_register_debugfs(void)
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d6a7784d2988..d19e5ffb5104 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -170,15 +170,19 @@ static struct dcp *global_sdcp;
static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
{
+ int dma_err;
struct dcp *sdcp = global_sdcp;
const int chan = actx->chan;
uint32_t stat;
unsigned long ret;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
-
dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
DMA_TO_DEVICE);
+ dma_err = dma_mapping_error(sdcp->dev, desc_phys);
+ if (dma_err)
+ return dma_err;
+
reinit_completion(&sdcp->completion[chan]);
/* Clear status register. */
@@ -216,18 +220,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct skcipher_request *req, int init)
{
+ dma_addr_t key_phys, src_phys, dst_phys;
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
- dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
- 2 * AES_KEYSIZE_128,
- DMA_TO_DEVICE);
- dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
- DCP_BUF_SZ, DMA_TO_DEVICE);
- dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
- DCP_BUF_SZ, DMA_FROM_DEVICE);
+ key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+ 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
+ ret = dma_mapping_error(sdcp->dev, key_phys);
+ if (ret)
+ return ret;
+
+ src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
+ DCP_BUF_SZ, DMA_TO_DEVICE);
+ ret = dma_mapping_error(sdcp->dev, src_phys);
+ if (ret)
+ goto err_src;
+
+ dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
+ DCP_BUF_SZ, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(sdcp->dev, dst_phys);
+ if (ret)
+ goto err_dst;
if (actx->fill % AES_BLOCK_SIZE) {
dev_err(sdcp->dev, "Invalid block size!\n");
@@ -265,10 +280,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
ret = mxs_dcp_start_dma(actx);
aes_done_run:
+ dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
+err_dst:
+ dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+err_src:
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
- dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
- dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
return ret;
}
@@ -283,21 +300,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src;
- const int nents = sg_nents(req->src);
+ int dst_nents = sg_nents(dst);
const int out_off = DCP_BUF_SZ;
uint8_t *in_buf = sdcp->coh->aes_in_buf;
uint8_t *out_buf = sdcp->coh->aes_out_buf;
- uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
uint32_t dst_off = 0;
+ uint8_t *src_buf = NULL;
uint32_t last_out_len = 0;
uint8_t *key = sdcp->coh->aes_key;
int ret = 0;
- int split = 0;
- unsigned int i, len, clen, rem = 0, tlen = 0;
+ unsigned int i, len, clen, tlen = 0;
int init = 0;
bool limit_hit = false;
@@ -315,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
}
- for_each_sg(req->src, src, nents, i) {
+ for_each_sg(req->src, src, sg_nents(src), i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
tlen += len;
@@ -340,34 +356,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
* submit the buffer.
*/
if (actx->fill == out_off || sg_is_last(src) ||
- limit_hit) {
+ limit_hit) {
ret = mxs_dcp_run_aes(actx, req, init);
if (ret)
return ret;
init = 0;
- out_tmp = out_buf;
+ sg_pcopy_from_buffer(dst, dst_nents, out_buf,
+ actx->fill, dst_off);
+ dst_off += actx->fill;
last_out_len = actx->fill;
- while (dst && actx->fill) {
- if (!split) {
- dst_buf = sg_virt(dst);
- dst_off = 0;
- }
- rem = min(sg_dma_len(dst) - dst_off,
- actx->fill);
-
- memcpy(dst_buf + dst_off, out_tmp, rem);
- out_tmp += rem;
- dst_off += rem;
- actx->fill -= rem;
-
- if (dst_off == sg_dma_len(dst)) {
- dst = sg_next(dst);
- split = 0;
- } else {
- split = 1;
- }
- }
+ actx->fill = 0;
}
} while (len);
@@ -557,6 +556,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
+ ret = dma_mapping_error(sdcp->dev, buf_phys);
+ if (ret)
+ return ret;
+
/* Fill in the DMA descriptor. */
desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
MXS_DCP_CONTROL0_INTERRUPT |
@@ -589,6 +592,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
if (rctx->fini) {
digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(sdcp->dev, digest_phys);
+ if (ret)
+ goto done_run;
+
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
desc->payload = digest_phys;
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 0dd4c6b157de..9b968ac4ee7b 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1175,9 +1175,9 @@ static int omap_aes_probe(struct platform_device *pdev)
spin_lock_init(&dd->lock);
INIT_LIST_HEAD(&dd->list);
- spin_lock(&list_lock);
+ spin_lock_bh(&list_lock);
list_add_tail(&dd->list, &dev_list);
- spin_unlock(&list_lock);
+ spin_unlock_bh(&list_lock);
/* Initialize crypto engine */
dd->engine = crypto_engine_alloc_init(dev, 1);
@@ -1264,9 +1264,9 @@ static int omap_aes_remove(struct platform_device *pdev)
if (!dd)
return -ENODEV;
- spin_lock(&list_lock);
+ spin_lock_bh(&list_lock);
list_del(&dd->list);
- spin_unlock(&list_lock);
+ spin_unlock_bh(&list_lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index 31bdb1d76d11..a4cc6bf146ec 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -210,7 +210,7 @@ void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
buf = sg_virt(sg);
pages = get_order(len);
- if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
+ if (orig && (flags & OMAP_CRYPTO_DATA_COPIED))
omap_crypto_copy_data(sg, orig, offset, len);
if (flags & OMAP_CRYPTO_DATA_COPIED)
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index bc8631363d72..be77656864e3 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -1033,9 +1033,9 @@ static int omap_des_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dd->list);
- spin_lock(&list_lock);
+ spin_lock_bh(&list_lock);
list_add_tail(&dd->list, &dev_list);
- spin_unlock(&list_lock);
+ spin_unlock_bh(&list_lock);
/* Initialize des crypto engine */
dd->engine = crypto_engine_alloc_init(dev, 1);
@@ -1094,9 +1094,9 @@ static int omap_des_remove(struct platform_device *pdev)
if (!dd)
return -ENODEV;
- spin_lock(&list_lock);
+ spin_lock_bh(&list_lock);
list_del(&dd->list);
- spin_unlock(&list_lock);
+ spin_unlock_bh(&list_lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index dd53ad9987b0..f6bf53c00b61 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -105,7 +105,6 @@
#define FLAGS_FINAL 1
#define FLAGS_DMA_ACTIVE 2
#define FLAGS_OUTPUT_READY 3
-#define FLAGS_INIT 4
#define FLAGS_CPU 5
#define FLAGS_DMA_READY 6
#define FLAGS_AUTO_XOR 7
@@ -368,24 +367,6 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
hash[i] = le32_to_cpup((__le32 *)in + i);
}
-static int omap_sham_hw_init(struct omap_sham_dev *dd)
-{
- int err;
-
- err = pm_runtime_resume_and_get(dd->dev);
- if (err < 0) {
- dev_err(dd->dev, "failed to get sync: %d\n", err);
- return err;
- }
-
- if (!test_bit(FLAGS_INIT, &dd->flags)) {
- set_bit(FLAGS_INIT, &dd->flags);
- dd->err = 0;
- }
-
- return 0;
-}
-
static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
int final, int dma)
{
@@ -1093,11 +1074,14 @@ static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
ctx->op, ctx->total, ctx->digcnt, final);
- dd->req = req;
-
- err = omap_sham_hw_init(dd);
- if (err)
+ err = pm_runtime_resume_and_get(dd->dev);
+ if (err < 0) {
+ dev_err(dd->dev, "failed to get sync: %d\n", err);
return err;
+ }
+
+ dd->err = 0;
+ dd->req = req;
if (ctx->digcnt)
dd->pdata->copy_hash(req, 0);
@@ -1736,7 +1720,7 @@ static void omap_sham_done_task(unsigned long data)
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
goto finish;
} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
- if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
+ if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
omap_sham_update_dma_stop(dd);
if (dd->err) {
err = dd->err;
@@ -2129,7 +2113,6 @@ static int omap_sham_probe(struct platform_device *pdev)
dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
pm_runtime_enable(dev);
- pm_runtime_irq_safe(dev);
err = pm_runtime_get_sync(dev);
if (err < 0) {
@@ -2144,9 +2127,9 @@ static int omap_sham_probe(struct platform_device *pdev)
(rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
(rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
- spin_lock(&sham.lock);
+ spin_lock_bh(&sham.lock);
list_add_tail(&dd->list, &sham.dev_list);
- spin_unlock(&sham.lock);
+ spin_unlock_bh(&sham.lock);
dd->engine = crypto_engine_alloc_init(dev, 1);
if (!dd->engine) {
@@ -2194,10 +2177,11 @@ err_algs:
err_engine_start:
crypto_engine_exit(dd->engine);
err_engine:
- spin_lock(&sham.lock);
+ spin_lock_bh(&sham.lock);
list_del(&dd->list);
- spin_unlock(&sham.lock);
+ spin_unlock_bh(&sham.lock);
err_pm:
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
@@ -2215,9 +2199,9 @@ static int omap_sham_remove(struct platform_device *pdev)
dd = platform_get_drvdata(pdev);
if (!dd)
return -ENODEV;
- spin_lock(&sham.lock);
+ spin_lock_bh(&sham.lock);
list_del(&dd->list);
- spin_unlock(&sham.lock);
+ spin_unlock_bh(&sham.lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
crypto_unregister_ahash(
@@ -2225,6 +2209,7 @@ static int omap_sham_remove(struct platform_device *pdev)
dd->pdata->algs_info[i].registered--;
}
tasklet_kill(&dd->done_task);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!dd->polling_mode)
@@ -2235,32 +2220,11 @@ static int omap_sham_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int omap_sham_suspend(struct device *dev)
-{
- pm_runtime_put_sync(dev);
- return 0;
-}
-
-static int omap_sham_resume(struct device *dev)
-{
- int err = pm_runtime_resume_and_get(dev);
- if (err < 0) {
- dev_err(dev, "failed to get sync: %d\n", err);
- return err;
- }
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
-
static struct platform_driver omap_sham_driver = {
.probe = omap_sham_probe,
.remove = omap_sham_remove,
.driver = {
.name = "omap-sham",
- .pm = &omap_sham_pm_ops,
.of_match_table = omap_sham_of_match,
},
};
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 3524ddd48930..33d8e50dcbda 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -161,7 +161,7 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
}
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
{
return 0;
}
@@ -210,21 +210,21 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
- hw_data->disable_iov = adf_disable_sriov;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
- hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->reset_device = adf_reset_flr;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->uof_get_num_objs = uof_get_num_objs;
hw_data->uof_get_name = uof_get_name;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
+ hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
index a8805c815d16..359fb7989dfb 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
@@ -221,16 +221,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Set DMA identifier */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- dev_err(&pdev->dev, "No usable DMA configuration.\n");
- ret = -EFAULT;
- goto out_err;
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration.\n");
+ goto out_err;
}
/* Get accelerator capabilities mask */
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 1dd64af22bea..3027c01bc89e 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -111,11 +111,6 @@ static u32 get_pf2vf_offset(u32 i)
return ADF_C3XXX_PF2VF_OFFSET(i);
}
-static u32 get_vintmsk_offset(u32 i)
-{
- return ADF_C3XXX_VINTMSK_OFFSET(i);
-}
-
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -159,8 +154,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_C3XXX_SMIA1_MASK);
}
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
{
+ spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
return 0;
}
@@ -193,8 +190,6 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
- hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_admin_info = adf_gen2_get_admin_info;
hw_data->get_arb_info = adf_gen2_get_arb_info;
hw_data->get_sku = get_sku;
@@ -203,16 +198,18 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->configure_iov_threads = configure_iov_threads;
- hw_data->disable_iov = adf_disable_sriov;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
- hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->reset_device = adf_reset_flr;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+ hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index fece8e38025a..86ee02a86789 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -29,7 +29,6 @@
#define ADF_C3XXX_ERRSSMSH_EN BIT(3)
#define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_C3XXX_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* AE to function mapping */
#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index 7fb3343ae8b0..cc6e75dc60de 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* set dma identifier */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- dev_err(&pdev->dev, "No usable DMA configuration\n");
- ret = -EFAULT;
- goto out_err_disable;
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
-
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
}
if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err_free_reg;
+ goto out_err_disable_aer;
}
ret = qat_crypto_dev_config(accel_dev);
if (ret)
- goto out_err_free_reg;
+ goto out_err_disable_aer;
ret = adf_dev_init(accel_dev);
if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
adf_dev_stop(accel_dev);
out_err_dev_shutdown:
adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+ adf_disable_aer(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 15f6b9bdfb22..3e69b520e82f 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
return ADF_C3XXXIOV_PF2VF_OFFSET;
}
-static u32 get_vintmsk_offset(u32 i)
-{
- return ADF_C3XXXIOV_VINTMSK_OFFSET;
-}
-
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
- hw_data->send_admin_init = adf_vf2pf_init;
+ hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
- hw_data->disable_iov = adf_vf2pf_shutdown;
+ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
- hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
- hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+ hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
index 7945a9cd1c60..f5de4ce66014 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
@@ -13,7 +13,6 @@
#define ADF_C3XXXIOV_ETR_BAR 0
#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
#define ADF_C3XXXIOV_PF2VF_OFFSET 0x200
-#define ADF_C3XXXIOV_VINTMSK_OFFSET 0x208
void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 067ca5e17d38..1df1b868978d 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* set dma identifier */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- dev_err(&pdev->dev, "No usable DMA configuration\n");
- ret = -EFAULT;
- goto out_err_disable;
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
-
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
}
if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
+ adf_flush_vf_wq(accel_dev);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index 30337390513c..b023c80873bb 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -113,11 +113,6 @@ static u32 get_pf2vf_offset(u32 i)
return ADF_C62X_PF2VF_OFFSET(i);
}
-static u32 get_vintmsk_offset(u32 i)
-{
- return ADF_C62X_VINTMSK_OFFSET(i);
-}
-
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -161,8 +156,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_C62X_SMIA1_MASK);
}
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
{
+ spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
return 0;
}
@@ -195,8 +192,6 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
- hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_admin_info = adf_gen2_get_admin_info;
hw_data->get_arb_info = adf_gen2_get_arb_info;
hw_data->get_sku = get_sku;
@@ -205,16 +200,18 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->configure_iov_threads = configure_iov_threads;
- hw_data->disable_iov = adf_disable_sriov;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
- hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->reset_device = adf_reset_flr;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+ hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
index 53d3cb577f5b..e6664bd20c91 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
@@ -30,7 +30,6 @@
#define ADF_C62X_ERRSSMSH_EN BIT(3)
#define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_C62X_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* AE to function mapping */
#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 1f5de442e1e6..bf251dfe74b3 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* set dma identifier */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- dev_err(&pdev->dev, "No usable DMA configuration\n");
- ret = -EFAULT;
- goto out_err_disable;
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
-
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
}
if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err_free_reg;
+ goto out_err_disable_aer;
}
ret = qat_crypto_dev_config(accel_dev);
if (ret)
- goto out_err_free_reg;
+ goto out_err_disable_aer;
ret = adf_dev_init(accel_dev);
if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
adf_dev_stop(accel_dev);
out_err_dev_shutdown:
adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+ adf_disable_aer(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index d231583428c9..3bee3e467363 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
return ADF_C62XIOV_PF2VF_OFFSET;
}
-static u32 get_vintmsk_offset(u32 i)
-{
- return ADF_C62XIOV_VINTMSK_OFFSET;
-}
-
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
- hw_data->send_admin_init = adf_vf2pf_init;
+ hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
- hw_data->disable_iov = adf_vf2pf_shutdown;
+ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
- hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
- hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+ hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
index a6c04cf7a43c..794778c48678 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
@@ -13,7 +13,6 @@
#define ADF_C62XIOV_ETR_BAR 0
#define ADF_C62XIOV_ETR_MAX_BANKS 1
#define ADF_C62XIOV_PF2VF_OFFSET 0x200
-#define ADF_C62XIOV_VINTMSK_OFFSET 0x208
void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 51ea88c0b17d..8103bd81d617 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* set dma identifier */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- dev_err(&pdev->dev, "No usable DMA configuration\n");
- ret = -EFAULT;
- goto out_err_disable;
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
-
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
}
if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
+ adf_flush_vf_wq(accel_dev);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index ac435b44f1d2..38c0af6d4e43 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -18,8 +18,6 @@
#define ADF_4XXX_DEVICE_NAME "4xxx"
#define ADF_4XXX_PCI_DEVICE_ID 0x4940
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_ERRSOU3 (0x3A000 + 0x0C)
-#define ADF_ERRSOU5 (0x3A000 + 0xD8)
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -156,7 +154,6 @@ struct adf_hw_device_data {
u32 (*get_num_aes)(struct adf_hw_device_data *self);
u32 (*get_num_accels)(struct adf_hw_device_data *self);
u32 (*get_pf2vf_offset)(u32 i);
- u32 (*get_vintmsk_offset)(u32 i);
void (*get_arb_info)(struct arb_info *arb_csrs_info);
void (*get_admin_info)(struct admin_info *admin_csrs_info);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
@@ -174,7 +171,7 @@ struct adf_hw_device_data {
bool enable);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
- int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
+ int (*enable_pfvf_comms)(struct adf_accel_dev *accel_dev);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
char *(*uof_get_name)(u32 obj_num);
@@ -227,7 +224,6 @@ struct adf_fw_loader_data {
struct adf_accel_vf_info {
struct adf_accel_dev *accel_dev;
- struct tasklet_struct vf2pf_bh_tasklet;
struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
@@ -249,6 +245,8 @@ struct adf_accel_dev {
struct adf_accel_pci accel_pci_dev;
union {
struct {
+ /* protects VF2PF interrupts access */
+ spinlock_t vf2pf_ints_lock;
/* vf_info is non-zero when SR-IOV is init'ed */
struct adf_accel_vf_info *vf_info;
} pf;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index d2ae293d0df6..ed3e40bc56eb 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -194,7 +194,7 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev)
EXPORT_SYMBOL_GPL(adf_enable_aer);
/**
- * adf_disable_aer() - Enable Advance Error Reporting for acceleration device
+ * adf_disable_aer() - Disable Advance Error Reporting for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function disables PCI Advance Error Reporting for the
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index c61476553728..4261749fae8d 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -193,22 +193,23 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask);
+void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
+ u32 vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
-int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
void adf_exit_pf_wq(void);
int adf_init_vf_wq(void);
void adf_exit_vf_wq(void);
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
#else
-static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
-{
- return 0;
-}
+#define adf_sriov_configure NULL
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
@@ -222,12 +223,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
-static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
{
return 0;
}
-static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
{
}
@@ -249,5 +250,9 @@ static inline void adf_exit_vf_wq(void)
{
}
+static inline void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+}
+
#endif
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 744c40351428..60bc7b991d35 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -61,6 +61,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
struct service_hndl *service;
struct list_head *list_itr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ int ret;
if (!hw_data) {
dev_err(&GET_DEV(accel_dev),
@@ -88,8 +89,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
- hw_data->enable_ints(accel_dev);
-
if (adf_ae_init(accel_dev)) {
dev_err(&GET_DEV(accel_dev),
"Failed to initialise Acceleration Engine\n");
@@ -110,6 +109,13 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
}
set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+ hw_data->enable_ints(accel_dev);
+ hw_data->enable_error_correction(accel_dev);
+
+ ret = hw_data->enable_pfvf_comms(accel_dev);
+ if (ret)
+ return ret;
+
/*
* Subservice initialisation is divided into two stages: init and start.
* This is to facilitate any ordering dependencies between services
@@ -126,9 +132,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
set_bit(accel_dev->accel_id, service->init_status);
}
- hw_data->enable_error_correction(accel_dev);
- hw_data->enable_vf2pf_comms(accel_dev);
-
return 0;
}
EXPORT_SYMBOL_GPL(adf_dev_init);
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index e3ad5587be49..c678d5c531aa 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -15,6 +15,14 @@
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
+#define ADF_MAX_NUM_VFS 32
+#define ADF_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_ERRMSK3 (0x3A000 + 0x1C)
+#define ADF_ERRMSK5 (0x3A000 + 0xDC)
+#define ADF_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16)
+
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -71,14 +79,23 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
- void __iomem *pmisc_bar_addr = pmisc->virt_addr;
- u32 vf_mask;
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ u32 errsou3, errsou5, errmsk3, errmsk5;
+ unsigned long vf_mask;
/* Get the interrupt sources triggered by VFs */
- vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
- 0x0000FFFF) << 16) |
- ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
- 0x01FFFE00) >> 9);
+ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU3);
+ errsou5 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU5);
+ vf_mask = ADF_ERR_REG_VF2PF_L(errsou3);
+ vf_mask |= ADF_ERR_REG_VF2PF_U(errsou5);
+
+ /* To avoid adding duplicate entries to work queue, clear
+ * vf_int_mask_sets bits that are already masked in ERRMSK register.
+ */
+ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK3);
+ errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK5);
+ vf_mask &= ~ADF_ERR_REG_VF2PF_L(errmsk3);
+ vf_mask &= ~ADF_ERR_REG_VF2PF_U(errmsk5);
if (vf_mask) {
struct adf_accel_vf_info *vf_info;
@@ -86,15 +103,13 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
int i;
/* Disable VF2PF interrupts for VFs with pending ints */
- adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+ adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
/*
- * Schedule tasklets to handle VF2PF interrupt BHs
- * unless the VF is malicious and is attempting to
- * flood the host OS with VF2PF interrupts.
+ * Handle VF2PF interrupt unless the VF is malicious and
+ * is attempting to flood the host OS with VF2PF interrupts.
*/
- for_each_set_bit(i, (const unsigned long *)&vf_mask,
- (sizeof(vf_mask) * BITS_PER_BYTE)) {
+ for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
vf_info = accel_dev->pf.vf_info + i;
if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
@@ -104,8 +119,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
continue;
}
- /* Tasklet will re-enable ints from this VF */
- tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+ adf_schedule_vf2pf_handler(vf_info);
irq_handled = true;
}
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
index a1b77bd7a894..976b9ab7617c 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -11,28 +11,8 @@
#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
-void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
- struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- void __iomem *pmisc_bar_addr =
- pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
-
- ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
-}
-
-void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
- struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- void __iomem *pmisc_bar_addr =
- pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
-
- ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
-}
-
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- u32 vf_mask)
+static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ u32 vf_mask)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
@@ -55,7 +35,17 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
}
}
-void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+ __adf_enable_vf2pf_interrupts(accel_dev, vf_mask);
+ spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ u32 vf_mask)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
@@ -78,6 +68,22 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
}
}
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+ __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+ spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+ spin_lock(&accel_dev->pf.vf2pf_ints_lock);
+ __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+ spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
+}
+
static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
{
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
@@ -186,7 +192,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
return ret;
}
-EXPORT_SYMBOL_GPL(adf_iov_putmsg);
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
{
@@ -216,7 +221,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
(ADF_PF2VF_MSGTYPE_VERSION_RESP <<
ADF_PF2VF_MSGTYPE_SHIFT) |
- (ADF_PFVF_COMPATIBILITY_VERSION <<
+ (ADF_PFVF_COMPAT_THIS_VERSION <<
ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
dev_dbg(&GET_DEV(accel_dev),
@@ -226,19 +231,19 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
if (vf_compat_ver < hw_data->min_iov_compat_ver) {
dev_err(&GET_DEV(accel_dev),
"VF (vers %d) incompatible with PF (vers %d)\n",
- vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+ } else if (vf_compat_ver > ADF_PFVF_COMPAT_THIS_VERSION) {
dev_err(&GET_DEV(accel_dev),
"VF (vers %d) compat with PF (vers %d) unkn.\n",
- vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
} else {
dev_dbg(&GET_DEV(accel_dev),
"VF (vers %d) compatible with PF (vers %d)\n",
- vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
resp |= ADF_PF2VF_VF_COMPATIBLE <<
ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
}
@@ -251,7 +256,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
(ADF_PF2VF_MSGTYPE_VERSION_RESP <<
ADF_PF2VF_MSGTYPE_SHIFT) |
- (ADF_PFVF_COMPATIBILITY_VERSION <<
+ (ADF_PFVF_COMPAT_THIS_VERSION <<
ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
resp |= ADF_PF2VF_VF_COMPATIBLE <<
ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
@@ -284,6 +289,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
/* re-enable interrupt on PF from this VF */
adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+
return;
err:
dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
@@ -313,8 +319,10 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
- msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
- BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+ msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+ BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
+
+ reinit_completion(&accel_dev->vf.iov_msg_completion);
/* Send request from VF to PF */
ret = adf_iov_putmsg(accel_dev, msg, 0);
@@ -338,14 +346,16 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
break;
case ADF_PF2VF_VF_COMPAT_UNKNOWN:
/* VF is newer than PF and decides whether it is compatible */
- if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+ if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) {
+ accel_dev->vf.compatible = ADF_PF2VF_VF_COMPATIBLE;
break;
+ }
fallthrough;
case ADF_PF2VF_VF_INCOMPATIBLE:
dev_err(&GET_DEV(accel_dev),
"PF (vers %d) and VF (vers %d) are not compatible\n",
accel_dev->vf.pf_version,
- ADF_PFVF_COMPATIBILITY_VERSION);
+ ADF_PFVF_COMPAT_THIS_VERSION);
return -EINVAL;
default:
dev_err(&GET_DEV(accel_dev),
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
index 0690c031bfce..ffd43aa50b57 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -52,7 +52,7 @@
* IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
*/
-#define ADF_PFVF_COMPATIBILITY_VERSION 0x1 /* PF<->VF compat */
+#define ADF_PFVF_COMPAT_THIS_VERSION 0x1 /* PF<->VF compat */
/* PF->VF messages */
#define ADF_PF2VF_INT BIT(0)
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 8c822c2861c2..90ec057f9183 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -24,9 +24,8 @@ static void adf_iov_send_resp(struct work_struct *work)
kfree(pf2vf_resp);
}
-static void adf_vf2pf_bh_handler(void *data)
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
{
- struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
struct adf_pf2vf_resp *pf2vf_resp;
pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
@@ -52,9 +51,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
vf_info->accel_dev = accel_dev;
vf_info->vf_nr = i;
- tasklet_init(&vf_info->vf2pf_bh_tasklet,
- (void *)adf_vf2pf_bh_handler,
- (unsigned long)vf_info);
mutex_init(&vf_info->pf2vf_lock);
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
DEFAULT_RATELIMIT_INTERVAL,
@@ -110,8 +106,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
hw_data->configure_iov_threads(accel_dev, false);
for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
- tasklet_disable(&vf->vf2pf_bh_tasklet);
- tasklet_kill(&vf->vf2pf_bh_tasklet);
mutex_destroy(&vf->pf2vf_lock);
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
index e85bd62d134a..3e25fac051b2 100644
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -5,14 +5,14 @@
#include "adf_pf2vf_msg.h"
/**
- * adf_vf2pf_init() - send init msg to PF
+ * adf_vf2pf_notify_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends an init message from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
-int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
@@ -25,17 +25,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
return 0;
}
-EXPORT_SYMBOL_GPL(adf_vf2pf_init);
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
/**
- * adf_vf2pf_shutdown() - send shutdown msg to PF
+ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends a shutdown message from the VF to a PF
*
* Return: void
*/
-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
@@ -45,4 +45,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
-EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 888388acb6bd..7828a6573f3e 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -18,6 +18,7 @@
#include "adf_pf2vf_msg.h"
#define ADF_VINTSOU_OFFSET 0x204
+#define ADF_VINTMSK_OFFSET 0x208
#define ADF_VINTSOU_BUN BIT(0)
#define ADF_VINTSOU_PF2VF BIT(1)
@@ -28,6 +29,27 @@ struct adf_vf_stop_data {
struct work_struct work;
};
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+ ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x0);
+}
+
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+ ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
static int adf_enable_msi(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -160,11 +182,21 @@ static irqreturn_t adf_isr(int irq, void *privdata)
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_bar_addr = pmisc->virt_addr;
- u32 v_int;
+ bool handled = false;
+ u32 v_int, v_mask;
/* Read VF INT source CSR to determine the source of VF interrupt */
v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
+ /* Read VF INT mask CSR to determine which sources are masked */
+ v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
+
+ /*
+ * Recompute v_int ignoring sources that are masked. This is to
+ * avoid rescheduling the tasklet for interrupts already handled
+ */
+ v_int &= ~v_mask;
+
/* Check for PF2VF interrupt */
if (v_int & ADF_VINTSOU_PF2VF) {
/* Disable PF to VF interrupt */
@@ -172,7 +204,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
/* Schedule tasklet to handle interrupt BH */
tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
- return IRQ_HANDLED;
+ handled = true;
}
/* Check bundle interrupt */
@@ -184,10 +216,10 @@ static irqreturn_t adf_isr(int irq, void *privdata)
csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
bank->bank_number, 0);
tasklet_hi_schedule(&bank->resp_handler);
- return IRQ_HANDLED;
+ handled = true;
}
- return IRQ_NONE;
+ return handled ? IRQ_HANDLED : IRQ_NONE;
}
static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
@@ -285,6 +317,30 @@ err_out:
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+/**
+ * adf_flush_vf_wq() - Flush workqueue for VF
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function disables the PF/VF interrupts on the VF so that no new messages
+ * are received and flushes the workqueue 'adf_vf_stop_wq'.
+ *
+ * Return: void.
+ */
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+ adf_disable_pf2vf_interrupts(accel_dev);
+
+ flush_workqueue(adf_vf_stop_wq);
+}
+EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
+
+/**
+ * adf_init_vf_wq() - Init workqueue for VF
+ *
+ * Function init workqueue 'adf_vf_stop_wq' for VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
int __init adf_init_vf_wq(void)
{
adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 7dd7cd6c3ef8..0a9ce365a544 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -131,11 +131,6 @@ static u32 get_pf2vf_offset(u32 i)
return ADF_DH895XCC_PF2VF_OFFSET(i);
}
-static u32 get_vintmsk_offset(u32 i)
-{
- return ADF_DH895XCC_VINTMSK_OFFSET(i);
-}
-
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -180,8 +175,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_DH895XCC_SMIA1_MASK);
}
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
{
+ spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
return 0;
}
@@ -213,8 +210,6 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
- hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_admin_info = adf_gen2_get_admin_info;
hw_data->get_arb_info = adf_gen2_get_arb_info;
hw_data->get_sram_bar_id = get_sram_bar_id;
@@ -224,15 +219,17 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->configure_iov_threads = configure_iov_threads;
- hw_data->disable_iov = adf_disable_sriov;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
- hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->reset_device = adf_reset_sbr;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+ hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 4d613923d155..f99319cd4543 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -35,7 +35,6 @@
#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* AE to function mapping */
#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index a9ec4357144c..3976a81bd99b 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* set dma identifier */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- dev_err(&pdev->dev, "No usable DMA configuration\n");
- ret = -EFAULT;
- goto out_err_disable;
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
-
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
}
if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err_free_reg;
+ goto out_err_disable_aer;
}
ret = qat_crypto_dev_config(accel_dev);
if (ret)
- goto out_err_free_reg;
+ goto out_err_disable_aer;
ret = adf_dev_init(accel_dev);
if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
adf_dev_stop(accel_dev);
out_err_dev_shutdown:
adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+ adf_disable_aer(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index f14fb82ed6df..7c6ed6bc8abf 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
return ADF_DH895XCCIOV_PF2VF_OFFSET;
}
-static u32 get_vintmsk_offset(u32 i)
-{
- return ADF_DH895XCCIOV_VINTMSK_OFFSET;
-}
-
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
- hw_data->send_admin_init = adf_vf2pf_init;
+ hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
- hw_data->disable_iov = adf_vf2pf_shutdown;
+ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
- hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
- hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+ hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
index 2bfcc67f8f39..306ebb71a408 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -13,7 +13,6 @@
#define ADF_DH895XCCIOV_ETR_BAR 0
#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
#define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200
-#define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208
void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 29999da716cc..99d90f3ea2b7 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* set dma identifier */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- dev_err(&pdev->dev, "No usable DMA configuration\n");
- ret = -EFAULT;
- goto out_err_disable;
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
-
- } else {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
}
if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
+ adf_flush_vf_wq(accel_dev);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 080955a1dd9c..e2375d992308 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -187,9 +187,9 @@ static int virtcrypto_init_vqs(struct virtio_crypto *vi)
if (ret)
goto err_free;
- get_online_cpus();
+ cpus_read_lock();
virtcrypto_set_affinity(vi);
- put_online_cpus();
+ cpus_read_unlock();
return 0;
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index 9f937b125ab0..60ccf3e90d7d 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/arm-smccc.h>
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <asm/archrandom.h>
static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
@@ -42,3 +43,19 @@ u32 arm_smccc_get_version(void)
return smccc_version;
}
EXPORT_SYMBOL_GPL(arm_smccc_get_version);
+
+static int __init smccc_devices_init(void)
+{
+ struct platform_device *pdev;
+
+ if (smccc_trng_available) {
+ pdev = platform_device_register_simple("smccc_trng", -1,
+ NULL, 0);
+ if (IS_ERR(pdev))
+ pr_err("smccc_trng: could not register device: %ld\n",
+ PTR_ERR(pdev));
+ }
+
+ return 0;
+}
+device_initcall(smccc_devices_init);