diff options
Diffstat (limited to 'drivers/cxl/core/memdev.c')
-rw-r--r-- | drivers/cxl/core/memdev.c | 503 |
1 files changed, 484 insertions, 19 deletions
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 057a43267290..f99e7ec3cc40 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. */ +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/firmware.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/idr.h> @@ -39,8 +41,11 @@ static ssize_t firmware_version_show(struct device *dev, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version); + if (!mds) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%.16s\n", mds->firmware_version); } static DEVICE_ATTR_RO(firmware_version); @@ -49,8 +54,11 @@ static ssize_t payload_max_show(struct device *dev, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - return sysfs_emit(buf, "%zu\n", cxlds->payload_size); + if (!mds) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%zu\n", mds->payload_size); } static DEVICE_ATTR_RO(payload_max); @@ -59,8 +67,11 @@ static ssize_t label_storage_size_show(struct device *dev, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - return sysfs_emit(buf, "%zu\n", cxlds->lsa_size); + if (!mds) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%zu\n", mds->lsa_size); } static DEVICE_ATTR_RO(label_storage_size); @@ -107,6 +118,89 @@ static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(numa_node); +static ssize_t security_state_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); + u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); + u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg); + u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); + unsigned long state = mds->security.state; + + if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100) + return sysfs_emit(buf, "sanitize\n"); + + if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) + return sysfs_emit(buf, "disabled\n"); + if (state & CXL_PMEM_SEC_STATE_FROZEN || + state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT || + state & CXL_PMEM_SEC_STATE_USER_PLIMIT) + return sysfs_emit(buf, "frozen\n"); + if (state & CXL_PMEM_SEC_STATE_LOCKED) + return sysfs_emit(buf, "locked\n"); + else + return sysfs_emit(buf, "unlocked\n"); +} +static struct device_attribute dev_attr_security_state = + __ATTR(state, 0444, security_state_show, NULL); + +static ssize_t security_sanitize_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_port *port = cxlmd->endpoint; + bool sanitize; + ssize_t rc; + + if (kstrtobool(buf, &sanitize) || !sanitize) + return -EINVAL; + + if (!port || !is_cxl_endpoint(port)) + return -EINVAL; + + /* ensure no regions are mapped to this memdev */ + if (port->commit_end != -1) + return -EBUSY; + + rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE); + + return rc ? rc : len; +} +static struct device_attribute dev_attr_security_sanitize = + __ATTR(sanitize, 0200, NULL, security_sanitize_store); + +static ssize_t security_erase_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_port *port = cxlmd->endpoint; + ssize_t rc; + bool erase; + + if (kstrtobool(buf, &erase) || !erase) + return -EINVAL; + + if (!port || !is_cxl_endpoint(port)) + return -EINVAL; + + /* ensure no regions are mapped to this memdev */ + if (port->commit_end != -1) + return -EBUSY; + + rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE); + + return rc ? rc : len; +} +static struct device_attribute dev_attr_security_erase = + __ATTR(erase, 0200, NULL, security_erase_store); + static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd) { struct cxl_dev_state *cxlds = cxlmd->cxlds; @@ -140,7 +234,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) struct cxl_port *port; int rc; - port = dev_get_drvdata(&cxlmd->dev); + port = cxlmd->endpoint; if (!port || !is_cxl_endpoint(port)) return -EINVAL; @@ -198,7 +292,7 @@ static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa) ctx = (struct cxl_dpa_to_region_context) { .dpa = dpa, }; - port = dev_get_drvdata(&cxlmd->dev); + port = cxlmd->endpoint; if (port && is_cxl_endpoint(port) && port->commit_end != -1) device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); @@ -231,7 +325,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) { - struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); struct cxl_mbox_inject_poison inject; struct cxl_poison_record record; struct cxl_mbox_cmd mbox_cmd; @@ -255,13 +349,13 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) .size_in = sizeof(inject), .payload_in = &inject, }; - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + rc = cxl_internal_send_cmd(mds, &mbox_cmd); if (rc) goto out; cxlr = cxl_dpa_to_region(cxlmd, dpa); if (cxlr) - dev_warn_once(cxlds->dev, + dev_warn_once(mds->cxlds.dev, "poison inject dpa:%#llx region: %s\n", dpa, dev_name(&cxlr->dev)); @@ -279,7 +373,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL); int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) { - struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); struct cxl_mbox_clear_poison clear; struct cxl_poison_record record; struct cxl_mbox_cmd mbox_cmd; @@ -312,14 +406,15 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) .payload_in = &clear, }; - rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + rc = cxl_internal_send_cmd(mds, &mbox_cmd); if (rc) goto out; cxlr = cxl_dpa_to_region(cxlmd, dpa); if (cxlr) - dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n", - dpa, dev_name(&cxlr->dev)); + dev_warn_once(mds->cxlds.dev, + "poison clear dpa:%#llx region: %s\n", dpa, + dev_name(&cxlr->dev)); record = (struct cxl_poison_record) { .address = cpu_to_le64(dpa), @@ -352,6 +447,13 @@ static struct attribute *cxl_memdev_ram_attributes[] = { NULL, }; +static struct attribute *cxl_memdev_security_attributes[] = { + &dev_attr_security_state.attr, + &dev_attr_security_sanitize.attr, + &dev_attr_security_erase.attr, + NULL, +}; + static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a, int n) { @@ -375,10 +477,16 @@ static struct attribute_group cxl_memdev_pmem_attribute_group = { .attrs = cxl_memdev_pmem_attributes, }; +static struct attribute_group cxl_memdev_security_attribute_group = { + .name = "security", + .attrs = cxl_memdev_security_attributes, +}; + static const struct attribute_group *cxl_memdev_attribute_groups[] = { &cxl_memdev_attribute_group, &cxl_memdev_ram_attribute_group, &cxl_memdev_pmem_attribute_group, + &cxl_memdev_security_attribute_group, NULL, }; @@ -397,17 +505,18 @@ EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL); /** * set_exclusive_cxl_commands() - atomically disable user cxl commands - * @cxlds: The device state to operate on + * @mds: The device state to operate on * @cmds: bitmap of commands to mark exclusive * * Grab the cxl_memdev_rwsem in write mode to flush in-flight * invocations of the ioctl path and then disable future execution of * commands with the command ids set in @cmds. */ -void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds) +void set_exclusive_cxl_commands(struct cxl_memdev_state *mds, + unsigned long *cmds) { down_write(&cxl_memdev_rwsem); - bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds, + bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds, CXL_MEM_COMMAND_ID_MAX); up_write(&cxl_memdev_rwsem); } @@ -415,23 +524,34 @@ EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL); /** * clear_exclusive_cxl_commands() - atomically enable user cxl commands - * @cxlds: The device state to modify + * @mds: The device state to modify * @cmds: bitmap of commands to mark available for userspace */ -void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds) +void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, + unsigned long *cmds) { down_write(&cxl_memdev_rwsem); - bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds, + bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds, CXL_MEM_COMMAND_ID_MAX); up_write(&cxl_memdev_rwsem); } EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL); +static void cxl_memdev_security_shutdown(struct device *dev) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + + if (mds->security.poll) + cancel_delayed_work_sync(&mds->security.poll_dwork); +} + static void cxl_memdev_shutdown(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); down_write(&cxl_memdev_rwsem); + cxl_memdev_security_shutdown(dev); cxlmd->cxlds = NULL; up_write(&cxl_memdev_rwsem); } @@ -511,10 +631,12 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct cxl_memdev *cxlmd = file->private_data; + struct cxl_dev_state *cxlds; int rc = -ENXIO; down_read(&cxl_memdev_rwsem); - if (cxlmd->cxlds) + cxlds = cxlmd->cxlds; + if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM) rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); up_read(&cxl_memdev_rwsem); @@ -542,6 +664,316 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file) return 0; } +/** + * cxl_mem_get_fw_info - Get Firmware info + * @mds: The device data for the operation + * + * Retrieve firmware info for the device specified. + * + * Return: 0 if no error: or the result of the mailbox command. + * + * See CXL-3.0 8.2.9.3.1 Get FW Info + */ +static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) +{ + struct cxl_mbox_get_fw_info info; + struct cxl_mbox_cmd mbox_cmd; + int rc; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_FW_INFO, + .size_out = sizeof(info), + .payload_out = &info, + }; + + rc = cxl_internal_send_cmd(mds, &mbox_cmd); + if (rc < 0) + return rc; + + mds->fw.num_slots = info.num_slots; + mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK, + info.slot_info); + + return 0; +} + +/** + * cxl_mem_activate_fw - Activate Firmware + * @mds: The device data for the operation + * @slot: slot number to activate + * + * Activate firmware in a given slot for the device specified. + * + * Return: 0 if no error: or the result of the mailbox command. + * + * See CXL-3.0 8.2.9.3.3 Activate FW + */ +static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) +{ + struct cxl_mbox_activate_fw activate; + struct cxl_mbox_cmd mbox_cmd; + + if (slot == 0 || slot > mds->fw.num_slots) + return -EINVAL; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_ACTIVATE_FW, + .size_in = sizeof(activate), + .payload_in = &activate, + }; + + /* Only offline activation supported for now */ + activate.action = CXL_FW_ACTIVATE_OFFLINE; + activate.slot = slot; + + return cxl_internal_send_cmd(mds, &mbox_cmd); +} + +/** + * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer + * @mds: The device data for the operation + * + * Abort an in-progress firmware transfer for the device specified. + * + * Return: 0 if no error: or the result of the mailbox command. + * + * See CXL-3.0 8.2.9.3.2 Transfer FW + */ +static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds) +{ + struct cxl_mbox_transfer_fw *transfer; + struct cxl_mbox_cmd mbox_cmd; + int rc; + + transfer = kzalloc(struct_size(transfer, data, 0), GFP_KERNEL); + if (!transfer) + return -ENOMEM; + + /* Set a 1s poll interval and a total wait time of 30s */ + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_TRANSFER_FW, + .size_in = sizeof(*transfer), + .payload_in = transfer, + .poll_interval_ms = 1000, + .poll_count = 30, + }; + + transfer->action = CXL_FW_TRANSFER_ACTION_ABORT; + + rc = cxl_internal_send_cmd(mds, &mbox_cmd); + kfree(transfer); + return rc; +} + +static void cxl_fw_cleanup(struct fw_upload *fwl) +{ + struct cxl_memdev_state *mds = fwl->dd_handle; + + mds->fw.next_slot = 0; +} + +static int cxl_fw_do_cancel(struct fw_upload *fwl) +{ + struct cxl_memdev_state *mds = fwl->dd_handle; + struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_memdev *cxlmd = cxlds->cxlmd; + int rc; + + rc = cxl_mem_abort_fw_xfer(mds); + if (rc < 0) + dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc); + + return FW_UPLOAD_ERR_CANCELED; +} + +static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data, + u32 size) +{ + struct cxl_memdev_state *mds = fwl->dd_handle; + struct cxl_mbox_transfer_fw *transfer; + + if (!size) + return FW_UPLOAD_ERR_INVALID_SIZE; + + mds->fw.oneshot = struct_size(transfer, data, size) < + mds->payload_size; + + if (cxl_mem_get_fw_info(mds)) + return FW_UPLOAD_ERR_HW_ERROR; + + /* + * So far no state has been changed, hence no other cleanup is + * necessary. Simply return the cancelled status. + */ + if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) + return FW_UPLOAD_ERR_CANCELED; + + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, + u32 offset, u32 size, u32 *written) +{ + struct cxl_memdev_state *mds = fwl->dd_handle; + struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct cxl_mbox_transfer_fw *transfer; + struct cxl_mbox_cmd mbox_cmd; + u32 cur_size, remaining; + size_t size_in; + int rc; + + *written = 0; + + /* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */ + if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) { + dev_err(&cxlmd->dev, + "misaligned offset for FW transfer slice (%u)\n", + offset); + return FW_UPLOAD_ERR_RW_ERROR; + } + + /* + * Pick transfer size based on mds->payload_size @size must bw 128-byte + * aligned, ->payload_size is a power of 2 starting at 256 bytes, and + * sizeof(*transfer) is 128. These constraints imply that @cur_size + * will always be 128b aligned. + */ + cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer)); + + remaining = size - cur_size; + size_in = struct_size(transfer, data, cur_size); + + if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) + return cxl_fw_do_cancel(fwl); + + /* + * Slot numbers are 1-indexed + * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1') + * Check for rollover using modulo, and 1-index it by adding 1 + */ + mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1; + + /* Do the transfer via mailbox cmd */ + transfer = kzalloc(size_in, GFP_KERNEL); + if (!transfer) + return FW_UPLOAD_ERR_RW_ERROR; + + transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT); + memcpy(transfer->data, data + offset, cur_size); + if (mds->fw.oneshot) { + transfer->action = CXL_FW_TRANSFER_ACTION_FULL; + transfer->slot = mds->fw.next_slot; + } else { + if (offset == 0) { + transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE; + } else if (remaining == 0) { + transfer->action = CXL_FW_TRANSFER_ACTION_END; + transfer->slot = mds->fw.next_slot; + } else { + transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE; + } + } + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_TRANSFER_FW, + .size_in = size_in, + .payload_in = transfer, + .poll_interval_ms = 1000, + .poll_count = 30, + }; + + rc = cxl_internal_send_cmd(mds, &mbox_cmd); + if (rc < 0) { + rc = FW_UPLOAD_ERR_RW_ERROR; + goto out_free; + } + + *written = cur_size; + + /* Activate FW if oneshot or if the last slice was written */ + if (mds->fw.oneshot || remaining == 0) { + dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n", + mds->fw.next_slot); + rc = cxl_mem_activate_fw(mds, mds->fw.next_slot); + if (rc < 0) { + dev_err(&cxlmd->dev, "Error activating firmware: %d\n", + rc); + rc = FW_UPLOAD_ERR_HW_ERROR; + goto out_free; + } + } + + rc = FW_UPLOAD_ERR_NONE; + +out_free: + kfree(transfer); + return rc; +} + +static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl) +{ + struct cxl_memdev_state *mds = fwl->dd_handle; + + /* + * cxl_internal_send_cmd() handles background operations synchronously. + * No need to wait for completions here - any errors would've been + * reported and handled during the ->write() call(s). + * Just check if a cancel request was received, and return success. + */ + if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) + return cxl_fw_do_cancel(fwl); + + return FW_UPLOAD_ERR_NONE; +} + +static void cxl_fw_cancel(struct fw_upload *fwl) +{ + struct cxl_memdev_state *mds = fwl->dd_handle; + + set_bit(CXL_FW_CANCEL, mds->fw.state); +} + +static const struct fw_upload_ops cxl_memdev_fw_ops = { + .prepare = cxl_fw_prepare, + .write = cxl_fw_write, + .poll_complete = cxl_fw_poll_complete, + .cancel = cxl_fw_cancel, + .cleanup = cxl_fw_cleanup, +}; + +static void devm_cxl_remove_fw_upload(void *fwl) +{ + firmware_upload_unregister(fwl); +} + +int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds) +{ + struct cxl_dev_state *cxlds = &mds->cxlds; + struct device *dev = &cxlds->cxlmd->dev; + struct fw_upload *fwl; + int rc; + + if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds)) + return 0; + + fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev), + &cxl_memdev_fw_ops, mds); + if (IS_ERR(fwl)) + return dev_err_probe(dev, PTR_ERR(fwl), + "Failed to register firmware loader\n"); + + rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload, + fwl); + if (rc) + dev_err(dev, + "Failed to add firmware loader remove action: %d\n", + rc); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL); + static const struct file_operations cxl_memdev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = cxl_memdev_ioctl, @@ -551,6 +983,35 @@ static const struct file_operations cxl_memdev_fops = { .llseek = noop_llseek, }; +static void put_sanitize(void *data) +{ + struct cxl_memdev_state *mds = data; + + sysfs_put(mds->security.sanitize_node); +} + +static int cxl_memdev_security_init(struct cxl_memdev *cxlmd) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); + struct device *dev = &cxlmd->dev; + struct kernfs_node *sec; + + sec = sysfs_get_dirent(dev->kobj.sd, "security"); + if (!sec) { + dev_err(dev, "sysfs_get_dirent 'security' failed\n"); + return -ENODEV; + } + mds->security.sanitize_node = sysfs_get_dirent(sec, "state"); + sysfs_put(sec); + if (!mds->security.sanitize_node) { + dev_err(dev, "sysfs_get_dirent 'state' failed\n"); + return -ENODEV; + } + + return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds); + } + struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds) { struct cxl_memdev *cxlmd; @@ -579,6 +1040,10 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds) if (rc) goto err; + rc = cxl_memdev_security_init(cxlmd); + if (rc) + goto err; + rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd); if (rc) return ERR_PTR(rc); |