summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit/intel.c25
-rw-r--r--drivers/cxl/Kconfig18
-rw-r--r--drivers/cxl/Makefile2
-rw-r--r--drivers/cxl/core/mbox.c16
-rw-r--r--drivers/cxl/core/pmem.c7
-rw-r--r--drivers/cxl/core/region.c34
-rw-r--r--drivers/cxl/cxl.h11
-rw-r--r--drivers/cxl/cxlmem.h41
-rw-r--r--drivers/cxl/pmem.c43
-rw-r--r--drivers/cxl/security.c163
-rw-r--r--drivers/nvdimm/Kconfig12
-rw-r--r--drivers/nvdimm/dimm_devs.c9
-rw-r--r--drivers/nvdimm/region.c11
-rw-r--r--drivers/nvdimm/region_devs.c50
-rw-r--r--drivers/nvdimm/security.c43
15 files changed, 448 insertions, 37 deletions
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index fa0e57e35162..3902759abcba 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -212,9 +212,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
return -ENOTTY;
- if (!cpu_cache_has_invalidate_memregion())
- return -EINVAL;
-
memcpy(nd_cmd.cmd.passphrase, key_data->data,
sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -229,9 +226,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
return -EIO;
}
- /* DIMM unlocked, invalidate all CPU caches before we read it */
- cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
-
return 0;
}
@@ -299,11 +293,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
if (!test_bit(cmd, &nfit_mem->dsm_mask))
return -ENOTTY;
- if (!cpu_cache_has_invalidate_memregion())
- return -EINVAL;
-
- /* flush all cache before we erase DIMM */
- cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
memcpy(nd_cmd.cmd.passphrase, key->data,
sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -322,8 +311,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
return -ENXIO;
}
- /* DIMM erased, invalidate all CPU caches before we read it */
- cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
return 0;
}
@@ -346,9 +333,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
return -ENOTTY;
- if (!cpu_cache_has_invalidate_memregion())
- return -EINVAL;
-
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
if (rc < 0)
return rc;
@@ -362,8 +346,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
return -ENXIO;
}
- /* flush all cache before we make the nvdimms available */
- cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
return 0;
}
@@ -388,11 +370,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
return -ENOTTY;
- if (!cpu_cache_has_invalidate_memregion())
- return -EINVAL;
-
- /* flush all cache before we erase DIMM */
- cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
memcpy(nd_cmd.cmd.passphrase, nkey->data,
sizeof(nd_cmd.cmd.passphrase));
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -770,5 +747,3 @@ static const struct nvdimm_fw_ops __intel_fw_ops = {
};
const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
-
-MODULE_IMPORT_NS(DEVMEM);
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 768ced3d6fe8..0ac53c422c31 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -111,4 +111,22 @@ config CXL_REGION
select MEMREGION
select GET_FREE_REGION
+config CXL_REGION_INVALIDATION_TEST
+ bool "CXL: Region Cache Management Bypass (TEST)"
+ depends on CXL_REGION
+ help
+ CXL Region management and security operations potentially invalidate
+ the content of CPU caches without notifiying those caches to
+ invalidate the affected cachelines. The CXL Region driver attempts
+ to invalidate caches when those events occur. If that invalidation
+ fails the region will fail to enable. Reasons for cache
+ invalidation failure are due to the CPU not providing a cache
+ invalidation mechanism. For example usage of wbinvd is restricted to
+ bare metal x86. However, for testing purposes toggling this option
+ can disable that data integrity safety and proceed with enabling
+ regions when there might be conflicting contents in the CPU cache.
+
+ If unsure, or if this kernel is meant for production environments,
+ say N.
+
endif
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
index a78270794150..db321f48ba52 100644
--- a/drivers/cxl/Makefile
+++ b/drivers/cxl/Makefile
@@ -9,5 +9,5 @@ obj-$(CONFIG_CXL_PORT) += cxl_port.o
cxl_mem-y := mem.o
cxl_pci-y := pci.o
cxl_acpi-y := acpi.o
-cxl_pmem-y := pmem.o
+cxl_pmem-y := pmem.o security.o
cxl_port-y := port.o
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 0c90f13870a4..35dd889f1d3a 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -65,6 +65,12 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
+ CXL_CMD(GET_SECURITY_STATE, 0, 0x4, 0),
+ CXL_CMD(SET_PASSPHRASE, 0x60, 0, 0),
+ CXL_CMD(DISABLE_PASSPHRASE, 0x40, 0, 0),
+ CXL_CMD(FREEZE_SECURITY, 0, 0, 0),
+ CXL_CMD(UNLOCK, 0x20, 0, 0),
+ CXL_CMD(PASSPHRASE_SECURE_ERASE, 0x40, 0, 0),
};
/*
@@ -698,6 +704,16 @@ int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
rc = 0;
}
+ /*
+ * Setup permanently kernel exclusive commands, i.e. the
+ * mechanism is driven through sysfs, keyctl, etc...
+ */
+ set_bit(CXL_MEM_COMMAND_ID_SET_PASSPHRASE, cxlds->exclusive_cmds);
+ set_bit(CXL_MEM_COMMAND_ID_DISABLE_PASSPHRASE, cxlds->exclusive_cmds);
+ set_bit(CXL_MEM_COMMAND_ID_UNLOCK, cxlds->exclusive_cmds);
+ set_bit(CXL_MEM_COMMAND_ID_PASSPHRASE_SECURE_ERASE,
+ cxlds->exclusive_cmds);
+
out:
kvfree(gsl);
return rc;
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index 16446473d814..f3d2169b6731 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -216,6 +216,13 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb,
dev->parent = &cxlmd->dev;
dev->bus = &cxl_bus_type;
dev->type = &cxl_nvdimm_type;
+ /*
+ * A "%llx" string is 17-bytes vs dimm_id that is max
+ * NVDIMM_KEY_DESC_LEN
+ */
+ BUILD_BUG_ON(sizeof(cxl_nvd->dev_id) < 17 ||
+ sizeof(cxl_nvd->dev_id) > NVDIMM_KEY_DESC_LEN);
+ sprintf(cxl_nvd->dev_id, "%llx", cxlmd->cxlds->serial);
return cxl_nvd;
}
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 1e61d1bafc0c..f75df35b9d3d 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -1403,6 +1403,8 @@ static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos)
goto out;
down_read(&cxl_dpa_rwsem);
rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos);
+ if (rc == 0)
+ set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem);
out:
@@ -1958,6 +1960,30 @@ err_bridge:
return rc;
}
+static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
+{
+ if (!test_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags))
+ return 0;
+
+ if (!cpu_cache_has_invalidate_memregion()) {
+ if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
+ dev_warn(
+ &cxlr->dev,
+ "Bypassing cpu_cache_invalidate_memergion() for testing!\n");
+ clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
+ return 0;
+ } else {
+ dev_err(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
+ return -ENXIO;
+ }
+ }
+
+ cpu_cache_invalidate_memregion(IORES_DESC_CXL);
+ clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
+ return 0;
+}
+
static int cxl_region_probe(struct device *dev)
{
struct cxl_region *cxlr = to_cxl_region(dev);
@@ -1973,14 +1999,21 @@ static int cxl_region_probe(struct device *dev)
if (p->state < CXL_CONFIG_COMMIT) {
dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
rc = -ENXIO;
+ goto out;
}
+ rc = cxl_region_invalidate_memregion(cxlr);
+
/*
* From this point on any path that changes the region's state away from
* CXL_CONFIG_COMMIT is also responsible for releasing the driver.
*/
+out:
up_read(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
switch (cxlr->mode) {
case CXL_DECODER_PMEM:
return devm_cxl_add_pmem_region(cxlr);
@@ -2008,4 +2041,5 @@ void cxl_region_exit(void)
}
MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS(DEVMEM);
MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 9a212ab3cae4..8b7fb33d368b 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -388,6 +388,12 @@ struct cxl_region_params {
int nr_targets;
};
+/*
+ * Flag whether this region needs to have its HPA span synchronized with
+ * CPU cache state at region activation time.
+ */
+#define CXL_REGION_F_INCOHERENT 0
+
/**
* struct cxl_region - CXL region
* @dev: This region's device
@@ -396,6 +402,7 @@ struct cxl_region_params {
* @type: Endpoint decoder target type
* @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown
* @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge
+ * @flags: Region state flags
* @params: active + config params for the region
*/
struct cxl_region {
@@ -405,6 +412,7 @@ struct cxl_region {
enum cxl_decoder_type type;
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_pmem_region *cxlr_pmem;
+ unsigned long flags;
struct cxl_region_params params;
};
@@ -416,9 +424,12 @@ struct cxl_nvdimm_bridge {
struct nvdimm_bus_descriptor nd_desc;
};
+#define CXL_DEV_ID_LEN 19
+
struct cxl_nvdimm {
struct device dev;
struct cxl_memdev *cxlmd;
+ u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */
};
struct cxl_pmem_region_mapping {
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 35d485d041f0..710c7694bf9f 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -288,6 +288,12 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
+ CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500,
+ CXL_MBOX_OP_SET_PASSPHRASE = 0x4501,
+ CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502,
+ CXL_MBOX_OP_UNLOCK = 0x4503,
+ CXL_MBOX_OP_FREEZE_SECURITY = 0x4504,
+ CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE = 0x4505,
CXL_MBOX_OP_MAX = 0x10000
};
@@ -387,6 +393,41 @@ struct cxl_mem_command {
#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
};
+#define CXL_PMEM_SEC_STATE_USER_PASS_SET 0x01
+#define CXL_PMEM_SEC_STATE_MASTER_PASS_SET 0x02
+#define CXL_PMEM_SEC_STATE_LOCKED 0x04
+#define CXL_PMEM_SEC_STATE_FROZEN 0x08
+#define CXL_PMEM_SEC_STATE_USER_PLIMIT 0x10
+#define CXL_PMEM_SEC_STATE_MASTER_PLIMIT 0x20
+
+/* set passphrase input payload */
+struct cxl_set_pass {
+ u8 type;
+ u8 reserved[31];
+ /* CXL field using NVDIMM define, same length */
+ u8 old_pass[NVDIMM_PASSPHRASE_LEN];
+ u8 new_pass[NVDIMM_PASSPHRASE_LEN];
+} __packed;
+
+/* disable passphrase input payload */
+struct cxl_disable_pass {
+ u8 type;
+ u8 reserved[31];
+ u8 pass[NVDIMM_PASSPHRASE_LEN];
+} __packed;
+
+/* passphrase secure erase payload */
+struct cxl_pass_erase {
+ u8 type;
+ u8 reserved[31];
+ u8 pass[NVDIMM_PASSPHRASE_LEN];
+} __packed;
+
+enum {
+ CXL_PMEM_SEC_PASS_MASTER = 0,
+ CXL_PMEM_SEC_PASS_USER,
+};
+
int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
size_t in_size, void *out, size_t out_size);
int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 0910367a3ead..2fc8070b6a17 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -11,6 +11,8 @@
#include "cxlmem.h"
#include "cxl.h"
+extern const struct nvdimm_security_ops *cxl_security_ops;
+
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
static void clear_exclusive(void *cxlds)
@@ -23,6 +25,41 @@ static void unregister_nvdimm(void *nvdimm)
nvdimm_delete(nvdimm);
}
+static ssize_t provider_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+
+ return sysfs_emit(buf, "%s\n", dev_name(&cxl_nvd->dev));
+}
+static DEVICE_ATTR_RO(provider);
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_dev_state *cxlds = cxl_nvd->cxlmd->cxlds;
+
+ return sysfs_emit(buf, "%lld\n", cxlds->serial);
+}
+static DEVICE_ATTR_RO(id);
+
+static struct attribute *cxl_dimm_attributes[] = {
+ &dev_attr_id.attr,
+ &dev_attr_provider.attr,
+ NULL
+};
+
+static const struct attribute_group cxl_dimm_attribute_group = {
+ .name = "cxl",
+ .attrs = cxl_dimm_attributes,
+};
+
+static const struct attribute_group *cxl_dimm_attribute_groups[] = {
+ &cxl_dimm_attribute_group,
+ NULL
+};
+
static int cxl_nvdimm_probe(struct device *dev)
{
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
@@ -42,8 +79,10 @@ static int cxl_nvdimm_probe(struct device *dev)
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
- nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags,
- cmd_mask, 0, NULL);
+ nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd,
+ cxl_dimm_attribute_groups, flags,
+ cmd_mask, 0, NULL, cxl_nvd->dev_id,
+ cxl_security_ops, NULL);
if (!nvdimm)
return -ENOMEM;
diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c
new file mode 100644
index 000000000000..5484d4eecfd1
--- /dev/null
+++ b/drivers/cxl/security.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/libnvdimm.h>
+#include <asm/unaligned.h>
+#include <linux/module.h>
+#include <linux/async.h>
+#include <linux/slab.h>
+#include <linux/memregion.h>
+#include "cxlmem.h"
+#include "cxl.h"
+
+static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
+ enum nvdimm_passphrase_type ptype)
+{
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ unsigned long security_flags = 0;
+ u32 sec_out;
+ int rc;
+
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SECURITY_STATE, NULL, 0,
+ &sec_out, sizeof(sec_out));
+ if (rc < 0)
+ return 0;
+
+ if (ptype == NVDIMM_MASTER) {
+ if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)
+ set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
+ else
+ set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
+ if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PLIMIT)
+ set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
+ return security_flags;
+ }
+
+ if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
+ if (sec_out & CXL_PMEM_SEC_STATE_FROZEN ||
+ sec_out & CXL_PMEM_SEC_STATE_USER_PLIMIT)
+ set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
+
+ if (sec_out & CXL_PMEM_SEC_STATE_LOCKED)
+ set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
+ else
+ set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
+ } else {
+ set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
+ }
+
+ return security_flags;
+}
+
+static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *old_data,
+ const struct nvdimm_key_data *new_data,
+ enum nvdimm_passphrase_type ptype)
+{
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_set_pass set_pass;
+ int rc;
+
+ set_pass.type = ptype == NVDIMM_MASTER ?
+ CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER;
+ memcpy(set_pass.old_pass, old_data->data, NVDIMM_PASSPHRASE_LEN);
+ memcpy(set_pass.new_pass, new_data->data, NVDIMM_PASSPHRASE_LEN);
+
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_PASSPHRASE,
+ &set_pass, sizeof(set_pass), NULL, 0);
+ return rc;
+}
+
+static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data,
+ enum nvdimm_passphrase_type ptype)
+{
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_disable_pass dis_pass;
+ int rc;
+
+ dis_pass.type = ptype == NVDIMM_MASTER ?
+ CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER;
+ memcpy(dis_pass.pass, key_data->data, NVDIMM_PASSPHRASE_LEN);
+
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_DISABLE_PASSPHRASE,
+ &dis_pass, sizeof(dis_pass), NULL, 0);
+ return rc;
+}
+
+static int cxl_pmem_security_disable(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data)
+{
+ return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_USER);
+}
+
+static int cxl_pmem_security_disable_master(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data)
+{
+ return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_MASTER);
+}
+
+static int cxl_pmem_security_freeze(struct nvdimm *nvdimm)
+{
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ return cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_FREEZE_SECURITY, NULL, 0, NULL, 0);
+}
+
+static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data)
+{
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ u8 pass[NVDIMM_PASSPHRASE_LEN];
+ int rc;
+
+ memcpy(pass, key_data->data, NVDIMM_PASSPHRASE_LEN);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_UNLOCK,
+ pass, NVDIMM_PASSPHRASE_LEN, NULL, 0);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key,
+ enum nvdimm_passphrase_type ptype)
+{
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_pass_erase erase;
+ int rc;
+
+ erase.type = ptype == NVDIMM_MASTER ?
+ CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER;
+ memcpy(erase.pass, key->data, NVDIMM_PASSPHRASE_LEN);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE,
+ &erase, sizeof(erase), NULL, 0);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static const struct nvdimm_security_ops __cxl_security_ops = {
+ .get_flags = cxl_pmem_get_security_flags,
+ .change_key = cxl_pmem_security_change_key,
+ .disable = cxl_pmem_security_disable,
+ .freeze = cxl_pmem_security_freeze,
+ .unlock = cxl_pmem_security_unlock,
+ .erase = cxl_pmem_security_passphrase_erase,
+ .disable_master = cxl_pmem_security_disable_master,
+};
+
+const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops;
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 5a29046e3319..79d93126453d 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -114,4 +114,16 @@ config NVDIMM_TEST_BUILD
core devm_memremap_pages() implementation and other
infrastructure.
+config NVDIMM_SECURITY_TEST
+ bool "Enable NVDIMM security unit tests"
+ depends on NVDIMM_KEYS
+ help
+ The NVDIMM and CXL subsystems support unit testing of their device
+ security state machines. The NVDIMM_SECURITY_TEST option disables CPU
+ cache maintenance operations around events like secure erase and
+ overwrite. Also, when enabled, the NVDIMM subsystem core helps the unit
+ test implement a mock state machine.
+
+ Select N if unsure.
+
endif
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index c7c980577491..1fc081dcf631 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -349,11 +349,18 @@ static ssize_t available_slots_show(struct device *dev,
}
static DEVICE_ATTR_RO(available_slots);
-__weak ssize_t security_show(struct device *dev,
+ssize_t security_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
+ /*
+ * For the test version we need to poll the "hardware" in order
+ * to get the updated status for unlock testing.
+ */
+ if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
+
if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
return sprintf(buf, "overwrite\n");
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 390123d293ea..88dc062af5f8 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -2,6 +2,7 @@
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
+#include <linux/memregion.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -100,6 +101,16 @@ static void nd_region_remove(struct device *dev)
*/
sysfs_put(nd_region->bb_state);
nd_region->bb_state = NULL;
+
+ /*
+ * Try to flush caches here since a disabled region may be subject to
+ * secure erase while disabled, and previous dirty data should not be
+ * written back to a new instance of the region. This only matters on
+ * bare metal where security commands are available, so silent failure
+ * here is ok.
+ */
+ if (cpu_cache_has_invalidate_memregion())
+ cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
}
static int child_notify(struct device *dev, void *data)
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index e0875d369762..83dbf398ea84 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -59,9 +59,51 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
return 0;
}
+static int nd_region_invalidate_memregion(struct nd_region *nd_region)
+{
+ int i, incoherent = 0;
+
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ if (test_bit(NDD_INCOHERENT, &nvdimm->flags)) {
+ incoherent++;
+ break;
+ }
+ }
+
+ if (!incoherent)
+ return 0;
+
+ if (!cpu_cache_has_invalidate_memregion()) {
+ if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) {
+ dev_warn(
+ &nd_region->dev,
+ "Bypassing cpu_cache_invalidate_memergion() for testing!\n");
+ goto out;
+ } else {
+ dev_err(&nd_region->dev,
+ "Failed to synchronize CPU cache state\n");
+ return -ENXIO;
+ }
+ }
+
+ cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
+out:
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ clear_bit(NDD_INCOHERENT, &nvdimm->flags);
+ }
+
+ return 0;
+}
+
int nd_region_activate(struct nd_region *nd_region)
{
- int i, j, num_flush = 0;
+ int i, j, rc, num_flush = 0;
struct nd_region_data *ndrd;
struct device *dev = &nd_region->dev;
size_t flush_data_size = sizeof(void *);
@@ -85,6 +127,10 @@ int nd_region_activate(struct nd_region *nd_region)
}
nvdimm_bus_unlock(&nd_region->dev);
+ rc = nd_region_invalidate_memregion(nd_region);
+ if (rc)
+ return rc;
+
ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
if (!ndrd)
return -ENOMEM;
@@ -1222,3 +1268,5 @@ int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
}
+
+MODULE_IMPORT_NS(DEVMEM);
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 8aefb60c42ff..a03e3c45f297 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
|| !nvdimm->sec.flags)
return -EIO;
+ /* cxl_test needs this to pre-populate the security state */
+ if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
+
/* No need to go further if security is disabled */
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return 0;
@@ -204,6 +208,8 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
rc = nvdimm->sec.ops->unlock(nvdimm, data);
dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
+ if (rc == 0)
+ set_bit(NDD_INCOHERENT, &nvdimm->flags);
nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
@@ -239,7 +245,8 @@ static int check_security_state(struct nvdimm *nvdimm)
return 0;
}
-static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
+static int security_disable(struct nvdimm *nvdimm, unsigned int keyid,
+ enum nvdimm_passphrase_type pass_type)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
@@ -250,8 +257,13 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops || !nvdimm->sec.flags)
+ return -EOPNOTSUPP;
+
+ if (pass_type == NVDIMM_USER && !nvdimm->sec.ops->disable)
+ return -EOPNOTSUPP;
+
+ if (pass_type == NVDIMM_MASTER && !nvdimm->sec.ops->disable_master)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
@@ -263,12 +275,21 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
if (!data)
return -ENOKEY;
- rc = nvdimm->sec.ops->disable(nvdimm, data);
- dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
+ if (pass_type == NVDIMM_MASTER) {
+ rc = nvdimm->sec.ops->disable_master(nvdimm, data);
+ dev_dbg(dev, "key: %d disable_master: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
+ } else {
+ rc = nvdimm->sec.ops->disable(nvdimm, data);
+ dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
+ rc == 0 ? "success" : "fail");
+ }
nvdimm_put_key(key);
- nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
+ if (pass_type == NVDIMM_MASTER)
+ nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
+ else
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
@@ -355,6 +376,8 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
return -ENOKEY;
rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
+ if (rc == 0)
+ set_bit(NDD_INCOHERENT, &nvdimm->flags);
dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
rc == 0 ? "success" : "fail");
@@ -389,6 +412,8 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return -ENOKEY;
rc = nvdimm->sec.ops->overwrite(nvdimm, data);
+ if (rc == 0)
+ set_bit(NDD_INCOHERENT, &nvdimm->flags);
dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
rc == 0 ? "success" : "fail");
@@ -473,6 +498,7 @@ void nvdimm_security_overwrite_query(struct work_struct *work)
#define OPS \
C( OP_FREEZE, "freeze", 1), \
C( OP_DISABLE, "disable", 2), \
+ C( OP_DISABLE_MASTER, "disable_master", 2), \
C( OP_UPDATE, "update", 3), \
C( OP_ERASE, "erase", 2), \
C( OP_OVERWRITE, "overwrite", 2), \
@@ -524,7 +550,10 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
rc = nvdimm_security_freeze(nvdimm);
} else if (i == OP_DISABLE) {
dev_dbg(dev, "disable %u\n", key);
- rc = security_disable(nvdimm, key);
+ rc = security_disable(nvdimm, key, NVDIMM_USER);
+ } else if (i == OP_DISABLE_MASTER) {
+ dev_dbg(dev, "disable_master %u\n", key);
+ rc = security_disable(nvdimm, key, NVDIMM_MASTER);
} else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
rc = security_update(nvdimm, key, newkey, i == OP_UPDATE