diff options
author | Dan Williams <dan.j.williams@intel.com> | 2023-06-25 17:16:51 -0700 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2023-06-25 17:16:51 -0700 |
commit | aeaefabc59ec3c7edd6d0ca7add7aff2ff6f59d0 (patch) | |
tree | d733849f230b613a1741dae7d588efbcf700fb27 /drivers/cxl/core/memdev.c | |
parent | 867eab655d3b38740d8d6c24705af25e0b1dbdb6 (diff) | |
parent | 8f0220af58c3b73e9041377a23708d37600b33c1 (diff) | |
download | lwn-aeaefabc59ec3c7edd6d0ca7add7aff2ff6f59d0.tar.gz lwn-aeaefabc59ec3c7edd6d0ca7add7aff2ff6f59d0.zip |
Merge branch 'for-6.5/cxl-type-2' into for-6.5/cxl
Pick up the driver cleanups identified in preparation for CXL "type-2"
(accelerator) device support. The major change here from a conflict
generation perspective is the split of 'struct cxl_memdev_state' from
the core 'struct cxl_dev_state'. Since an accelerator may not care about
all the optional features that are standard on a CXL "type-3" (host-only
memory expander) device.
A silent conflict also occurs with the move of the endpoint port to be a
formal property of a 'struct cxl_memdev' rather than drvdata.
Diffstat (limited to 'drivers/cxl/core/memdev.c')
-rw-r--r-- | drivers/cxl/core/memdev.c | 173 |
1 files changed, 96 insertions, 77 deletions
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index fd2e6b0f79c0..90237b9487a7 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -41,8 +41,11 @@ static ssize_t firmware_version_show(struct device *dev, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version); + if (!mds) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%.16s\n", mds->firmware_version); } static DEVICE_ATTR_RO(firmware_version); @@ -51,8 +54,11 @@ static ssize_t payload_max_show(struct device *dev, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - return sysfs_emit(buf, "%zu\n", cxlds->payload_size); + if (!mds) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%zu\n", mds->payload_size); } static DEVICE_ATTR_RO(payload_max); @@ -61,8 +67,11 @@ static ssize_t label_storage_size_show(struct device *dev, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - return sysfs_emit(buf, "%zu\n", cxlds->lsa_size); + if (!mds) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%zu\n", mds->lsa_size); } static DEVICE_ATTR_RO(label_storage_size); @@ -115,10 +124,11 @@ static ssize_t security_state_show(struct device *dev, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; - unsigned long state = cxlds->security.state; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg); u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); + unsigned long state = mds->security.state; if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100) return sysfs_emit(buf, "sanitize\n"); @@ -142,10 +152,10 @@ static ssize_t security_sanitize_store(struct device *dev, const char *buf, size_t len) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_port *port = dev_get_drvdata(&cxlmd->dev); - ssize_t rc; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_port *port = cxlmd->endpoint; bool sanitize; + ssize_t rc; if (kstrtobool(buf, &sanitize) || !sanitize) return -EINVAL; @@ -157,7 +167,7 @@ static ssize_t security_sanitize_store(struct device *dev, if (port->commit_end != -1) return -EBUSY; - rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SANITIZE); + rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE); return rc ? rc : len; } @@ -169,8 +179,8 @@ static ssize_t security_erase_store(struct device *dev, const char *buf, size_t len) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_port *port = dev_get_drvdata(&cxlmd->dev); + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_port *port = cxlmd->endpoint; ssize_t rc; bool erase; @@ -184,7 +194,7 @@ static ssize_t security_erase_store(struct device *dev, if (port->commit_end != -1) return -EBUSY; - rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SECURE_ERASE); + rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE); return rc ? rc : len; } @@ -224,7 +234,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) struct cxl_port *port; int rc; - port = dev_get_drvdata(&cxlmd->dev); + port = cxlmd->endpoint; if (!port || !is_cxl_endpoint(port)) return -EINVAL; @@ -282,7 +292,7 @@ static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa) ctx = (struct cxl_dpa_to_region_context) { .dpa = dpa, }; - port = dev_get_drvdata(&cxlmd->dev); + port = cxlmd->endpoint; if (port && is_cxl_endpoint(port) && port->commit_end != -1) device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); @@ -315,7 +325,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) { - struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); struct cxl_mbox_inject_poison inject; struct cxl_poison_record record; struct cxl_mbox_cmd mbox_cmd; @@ -339,13 +349,13 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) .size_in = sizeof(inject), .payload_in = &inject, }; - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + rc = cxl_internal_send_cmd(mds, &mbox_cmd); if (rc) goto out; cxlr = cxl_dpa_to_region(cxlmd, dpa); if (cxlr) - dev_warn_once(cxlds->dev, + dev_warn_once(mds->cxlds.dev, "poison inject dpa:%#llx region: %s\n", dpa, dev_name(&cxlr->dev)); @@ -363,7 +373,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL); int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) { - struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); struct cxl_mbox_clear_poison clear; struct cxl_poison_record record; struct cxl_mbox_cmd mbox_cmd; @@ -396,14 +406,15 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) .payload_in = &clear, }; - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + rc = cxl_internal_send_cmd(mds, &mbox_cmd); if (rc) goto out; cxlr = cxl_dpa_to_region(cxlmd, dpa); if (cxlr) - dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n", - dpa, dev_name(&cxlr->dev)); + dev_warn_once(mds->cxlds.dev, + "poison clear dpa:%#llx region: %s\n", dpa, + dev_name(&cxlr->dev)); record = (struct cxl_poison_record) { .address = cpu_to_le64(dpa), @@ -494,17 +505,18 @@ EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL); /** * set_exclusive_cxl_commands() - atomically disable user cxl commands - * @cxlds: The device state to operate on + * @mds: The device state to operate on * @cmds: bitmap of commands to mark exclusive * * Grab the cxl_memdev_rwsem in write mode to flush in-flight * invocations of the ioctl path and then disable future execution of * commands with the command ids set in @cmds. */ -void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds) +void set_exclusive_cxl_commands(struct cxl_memdev_state *mds, + unsigned long *cmds) { down_write(&cxl_memdev_rwsem); - bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds, + bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds, CXL_MEM_COMMAND_ID_MAX); up_write(&cxl_memdev_rwsem); } @@ -512,13 +524,14 @@ EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL); /** * clear_exclusive_cxl_commands() - atomically enable user cxl commands - * @cxlds: The device state to modify + * @mds: The device state to modify * @cmds: bitmap of commands to mark available for userspace */ -void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds) +void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, + unsigned long *cmds) { down_write(&cxl_memdev_rwsem); - bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds, + bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds, CXL_MEM_COMMAND_ID_MAX); up_write(&cxl_memdev_rwsem); } @@ -527,10 +540,10 @@ EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL); static void cxl_memdev_security_shutdown(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); - if (cxlds->security.poll) - cancel_delayed_work_sync(&cxlds->security.poll_dwork); + if (mds->security.poll) + cancel_delayed_work_sync(&mds->security.poll_dwork); } static void cxl_memdev_shutdown(struct device *dev) @@ -618,10 +631,12 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct cxl_memdev *cxlmd = file->private_data; + struct cxl_dev_state *cxlds; int rc = -ENXIO; down_read(&cxl_memdev_rwsem); - if (cxlmd->cxlds) + cxlds = cxlmd->cxlds; + if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM) rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); up_read(&cxl_memdev_rwsem); @@ -659,7 +674,7 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file) * * See CXL-3.0 8.2.9.3.1 Get FW Info */ -static int cxl_mem_get_fw_info(struct cxl_dev_state *cxlds) +static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) { struct cxl_mbox_get_fw_info info; struct cxl_mbox_cmd mbox_cmd; @@ -671,12 +686,12 @@ static int cxl_mem_get_fw_info(struct cxl_dev_state *cxlds) .payload_out = &info, }; - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + rc = cxl_internal_send_cmd(mds, &mbox_cmd); if (rc < 0) return rc; - cxlds->fw.num_slots = info.num_slots; - cxlds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK, + mds->fw.num_slots = info.num_slots; + mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK, info.slot_info); return 0; @@ -684,7 +699,7 @@ static int cxl_mem_get_fw_info(struct cxl_dev_state *cxlds) /** * cxl_mem_activate_fw - Activate Firmware - * @cxlds: The device data for the operation + * @mds: The device data for the operation * @slot: slot number to activate * * Activate firmware in a given slot for the device specified. @@ -693,12 +708,12 @@ static int cxl_mem_get_fw_info(struct cxl_dev_state *cxlds) * * See CXL-3.0 8.2.9.3.3 Activate FW */ -static int cxl_mem_activate_fw(struct cxl_dev_state *cxlds, int slot) +static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) { struct cxl_mbox_activate_fw activate; struct cxl_mbox_cmd mbox_cmd; - if (slot == 0 || slot > cxlds->fw.num_slots) + if (slot == 0 || slot > mds->fw.num_slots) return -EINVAL; mbox_cmd = (struct cxl_mbox_cmd) { @@ -711,12 +726,12 @@ static int cxl_mem_activate_fw(struct cxl_dev_state *cxlds, int slot) activate.action = CXL_FW_ACTIVATE_OFFLINE; activate.slot = slot; - return cxl_internal_send_cmd(cxlds, &mbox_cmd); + return cxl_internal_send_cmd(mds, &mbox_cmd); } /** * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer - * @cxlds: The device data for the operation + * @mds: The device data for the operation * * Abort an in-progress firmware transfer for the device specified. * @@ -724,7 +739,7 @@ static int cxl_mem_activate_fw(struct cxl_dev_state *cxlds, int slot) * * See CXL-3.0 8.2.9.3.2 Transfer FW */ -static int cxl_mem_abort_fw_xfer(struct cxl_dev_state *cxlds) +static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds) { struct cxl_mbox_transfer_fw *transfer; struct cxl_mbox_cmd mbox_cmd; @@ -745,25 +760,26 @@ static int cxl_mem_abort_fw_xfer(struct cxl_dev_state *cxlds) transfer->action = CXL_FW_TRANSFER_ACTION_ABORT; - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + rc = cxl_internal_send_cmd(mds, &mbox_cmd); kfree(transfer); return rc; } static void cxl_fw_cleanup(struct fw_upload *fwl) { - struct cxl_dev_state *cxlds = fwl->dd_handle; + struct cxl_memdev_state *mds = fwl->dd_handle; - cxlds->fw.next_slot = 0; + mds->fw.next_slot = 0; } static int cxl_fw_do_cancel(struct fw_upload *fwl) { - struct cxl_dev_state *cxlds = fwl->dd_handle; + struct cxl_memdev_state *mds = fwl->dd_handle; + struct cxl_dev_state *cxlds = &mds->cxlds; struct cxl_memdev *cxlmd = cxlds->cxlmd; int rc; - rc = cxl_mem_abort_fw_xfer(cxlds); + rc = cxl_mem_abort_fw_xfer(mds); if (rc < 0) dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc); @@ -773,23 +789,23 @@ static int cxl_fw_do_cancel(struct fw_upload *fwl) static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data, u32 size) { - struct cxl_dev_state *cxlds = fwl->dd_handle; + struct cxl_memdev_state *mds = fwl->dd_handle; struct cxl_mbox_transfer_fw *transfer; if (!size) return FW_UPLOAD_ERR_INVALID_SIZE; - cxlds->fw.oneshot = struct_size(transfer, data, size) < - cxlds->payload_size; + mds->fw.oneshot = struct_size(transfer, data, size) < + mds->payload_size; - if (cxl_mem_get_fw_info(cxlds)) + if (cxl_mem_get_fw_info(mds)) return FW_UPLOAD_ERR_HW_ERROR; /* * So far no state has been changed, hence no other cleanup is * necessary. Simply return the cancelled status. */ - if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state)) + if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) return FW_UPLOAD_ERR_CANCELED; return FW_UPLOAD_ERR_NONE; @@ -798,7 +814,8 @@ static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data, static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, u32 offset, u32 size, u32 *written) { - struct cxl_dev_state *cxlds = fwl->dd_handle; + struct cxl_memdev_state *mds = fwl->dd_handle; + struct cxl_dev_state *cxlds = &mds->cxlds; struct cxl_memdev *cxlmd = cxlds->cxlmd; struct cxl_mbox_transfer_fw *transfer; struct cxl_mbox_cmd mbox_cmd; @@ -817,17 +834,17 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, } /* - * Pick transfer size based on cxlds->payload_size - * @size must bw 128-byte aligned, ->payload_size is a power of 2 - * starting at 256 bytes, and sizeof(*transfer) is 128. - * These constraints imply that @cur_size will always be 128b aligned. + * Pick transfer size based on mds->payload_size @size must bw 128-byte + * aligned, ->payload_size is a power of 2 starting at 256 bytes, and + * sizeof(*transfer) is 128. These constraints imply that @cur_size + * will always be 128b aligned. */ - cur_size = min_t(size_t, size, cxlds->payload_size - sizeof(*transfer)); + cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer)); remaining = size - cur_size; size_in = struct_size(transfer, data, cur_size); - if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state)) + if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) return cxl_fw_do_cancel(fwl); /* @@ -835,7 +852,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1') * Check for rollover using modulo, and 1-index it by adding 1 */ - cxlds->fw.next_slot = (cxlds->fw.cur_slot % cxlds->fw.num_slots) + 1; + mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1; /* Do the transfer via mailbox cmd */ transfer = kzalloc(size_in, GFP_KERNEL); @@ -844,15 +861,15 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT); memcpy(transfer->data, data + offset, cur_size); - if (cxlds->fw.oneshot) { + if (mds->fw.oneshot) { transfer->action = CXL_FW_TRANSFER_ACTION_FULL; - transfer->slot = cxlds->fw.next_slot; + transfer->slot = mds->fw.next_slot; } else { if (offset == 0) { transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE; } else if (remaining == 0) { transfer->action = CXL_FW_TRANSFER_ACTION_END; - transfer->slot = cxlds->fw.next_slot; + transfer->slot = mds->fw.next_slot; } else { transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE; } @@ -866,7 +883,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, .poll_count = 30, }; - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + rc = cxl_internal_send_cmd(mds, &mbox_cmd); if (rc < 0) { rc = FW_UPLOAD_ERR_RW_ERROR; goto out_free; @@ -875,10 +892,10 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, *written = cur_size; /* Activate FW if oneshot or if the last slice was written */ - if (cxlds->fw.oneshot || remaining == 0) { + if (mds->fw.oneshot || remaining == 0) { dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n", - cxlds->fw.next_slot); - rc = cxl_mem_activate_fw(cxlds, cxlds->fw.next_slot); + mds->fw.next_slot); + rc = cxl_mem_activate_fw(mds, mds->fw.next_slot); if (rc < 0) { dev_err(&cxlmd->dev, "Error activating firmware: %d\n", rc); @@ -896,7 +913,7 @@ out_free: static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl) { - struct cxl_dev_state *cxlds = fwl->dd_handle; + struct cxl_memdev_state *mds = fwl->dd_handle; /* * cxl_internal_send_cmd() handles background operations synchronously. @@ -904,7 +921,7 @@ static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl) * reported and handled during the ->write() call(s). * Just check if a cancel request was received, and return success. */ - if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state)) + if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) return cxl_fw_do_cancel(fwl); return FW_UPLOAD_ERR_NONE; @@ -912,9 +929,9 @@ static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl) static void cxl_fw_cancel(struct fw_upload *fwl) { - struct cxl_dev_state *cxlds = fwl->dd_handle; + struct cxl_memdev_state *mds = fwl->dd_handle; - set_bit(CXL_FW_CANCEL, cxlds->fw.state); + set_bit(CXL_FW_CANCEL, mds->fw.state); } static const struct fw_upload_ops cxl_memdev_fw_ops = { @@ -930,17 +947,18 @@ static void devm_cxl_remove_fw_upload(void *fwl) firmware_upload_unregister(fwl); } -int cxl_memdev_setup_fw_upload(struct cxl_dev_state *cxlds) +int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds) { + struct cxl_dev_state *cxlds = &mds->cxlds; struct device *dev = &cxlds->cxlmd->dev; struct fw_upload *fwl; int rc; - if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxlds->enabled_cmds)) + if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds)) return 0; fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev), - &cxl_memdev_fw_ops, cxlds); + &cxl_memdev_fw_ops, mds); if (IS_ERR(fwl)) return dev_err_probe(dev, PTR_ERR(fwl), "Failed to register firmware loader\n"); @@ -967,14 +985,15 @@ static const struct file_operations cxl_memdev_fops = { static void put_sanitize(void *data) { - struct cxl_dev_state *cxlds = data; + struct cxl_memdev_state *mds = data; - sysfs_put(cxlds->security.sanitize_node); + sysfs_put(mds->security.sanitize_node); } static int cxl_memdev_security_init(struct cxl_memdev *cxlmd) { struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); struct device *dev = &cxlmd->dev; struct kernfs_node *sec; @@ -983,14 +1002,14 @@ static int cxl_memdev_security_init(struct cxl_memdev *cxlmd) dev_err(dev, "sysfs_get_dirent 'security' failed\n"); return -ENODEV; } - cxlds->security.sanitize_node = sysfs_get_dirent(sec, "state"); + mds->security.sanitize_node = sysfs_get_dirent(sec, "state"); sysfs_put(sec); - if (!cxlds->security.sanitize_node) { + if (!mds->security.sanitize_node) { dev_err(dev, "sysfs_get_dirent 'state' failed\n"); return -ENODEV; } - return devm_add_action_or_reset(cxlds->dev, put_sanitize, cxlds); + return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds); } struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds) |