summaryrefslogtreecommitdiff
path: root/drivers/cxl/core/memdev.c
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2024-09-05 15:35:46 -0700
committerDave Jiang <dave.jiang@intel.com>2024-09-12 08:38:01 -0700
commit8d8081cecfb9940beeb4a8a700db34e615a96056 (patch)
tree5ec4230778a9d3d11335178a8eb43387719d77dd /drivers/cxl/core/memdev.c
parent40a895fd9a358eea16901026360bd2cd3ca691a7 (diff)
downloadlwn-8d8081cecfb9940beeb4a8a700db34e615a96056.tar.gz
lwn-8d8081cecfb9940beeb4a8a700db34e615a96056.zip
cxl: Move mailbox related bits to the same context
Create a new 'struct cxl_mailbox' and move all mailbox related bits to it. This allows isolation of all CXL mailbox data in order to export some of the calls to external kernel callers and avoid exporting of CXL driver specific bits such has device states. The allocation of 'struct cxl_mailbox' is also split out with cxl_mailbox_init() so the mailbox can be created independently. Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Reviewed-by: Alejandro Lucero <alucerop@amd.com> Reviewed-by: Fan Ni <fan.ni@samsung.com> Reviewed-by: Alison Schofield <alison.schofield@intel.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Link: https://patch.msgid.link/20240905223711.1990186-3-dave.jiang@intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Diffstat (limited to 'drivers/cxl/core/memdev.c')
-rw-r--r--drivers/cxl/core/memdev.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 0277726afd04..05bb84cb1274 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -58,7 +58,7 @@ static ssize_t payload_max_show(struct device *dev,
if (!mds)
return sysfs_emit(buf, "\n");
- return sysfs_emit(buf, "%zu\n", mds->payload_size);
+ return sysfs_emit(buf, "%zu\n", cxlds->cxl_mbox.payload_size);
}
static DEVICE_ATTR_RO(payload_max);
@@ -124,15 +124,16 @@ static ssize_t security_state_show(struct device *dev,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
unsigned long state = mds->security.state;
int rc = 0;
/* sync with latest submission state */
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (mds->security.sanitize_active)
rc = sysfs_emit(buf, "sanitize\n");
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
if (rc)
return rc;
@@ -829,12 +830,13 @@ static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
{
struct cxl_memdev_state *mds = fwl->dd_handle;
struct cxl_mbox_transfer_fw *transfer;
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
if (!size)
return FW_UPLOAD_ERR_INVALID_SIZE;
mds->fw.oneshot = struct_size(transfer, data, size) <
- mds->payload_size;
+ cxl_mbox->payload_size;
if (cxl_mem_get_fw_info(mds))
return FW_UPLOAD_ERR_HW_ERROR;
@@ -854,6 +856,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
{
struct cxl_memdev_state *mds = fwl->dd_handle;
struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct cxl_mbox_transfer_fw *transfer;
struct cxl_mbox_cmd mbox_cmd;
@@ -877,7 +880,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
* sizeof(*transfer) is 128. These constraints imply that @cur_size
* will always be 128b aligned.
*/
- cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer));
+ cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer));
remaining = size - cur_size;
size_in = struct_size(transfer, data, cur_size);
@@ -1059,16 +1062,17 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
static void sanitize_teardown_notifier(void *data)
{
struct cxl_memdev_state *mds = data;
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct kernfs_node *state;
/*
* Prevent new irq triggered invocations of the workqueue and
* flush inflight invocations.
*/
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
state = mds->security.sanitize_node;
mds->security.sanitize_node = NULL;
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
cancel_delayed_work_sync(&mds->security.poll_dwork);
sysfs_put(state);