summaryrefslogtreecommitdiff
path: root/drivers/cxl/core/mbox.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cxl/core/mbox.c')
-rw-r--r--drivers/cxl/core/mbox.c186
1 files changed, 92 insertions, 94 deletions
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 576796a5d9f3..be61a0d8016b 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -128,8 +128,8 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
}
/**
- * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device.
- * @cxlm: The CXL memory device to communicate with.
+ * cxl_mbox_send_cmd() - Send a mailbox command to a device.
+ * @cxlds: The device data for the operation
* @opcode: Opcode for the mailbox command.
* @in: The input payload for the mailbox command.
* @in_size: The length of the input payload
@@ -148,11 +148,9 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
* Mailbox commands may execute successfully yet the device itself reported an
* error. While this distinction can be useful for commands from userspace, the
* kernel will only be able to use results when both are successful.
- *
- * See __cxl_mem_mbox_send_cmd()
*/
-int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
- size_t in_size, void *out, size_t out_size)
+int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
+ size_t in_size, void *out, size_t out_size)
{
const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
struct cxl_mbox_cmd mbox_cmd = {
@@ -164,10 +162,10 @@ int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
};
int rc;
- if (out_size > cxlm->payload_size)
+ if (out_size > cxlds->payload_size)
return -E2BIG;
- rc = cxlm->mbox_send(cxlm, &mbox_cmd);
+ rc = cxlds->mbox_send(cxlds, &mbox_cmd);
if (rc)
return rc;
@@ -184,7 +182,7 @@ int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_mem_mbox_send_cmd);
+EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL);
static bool cxl_mem_raw_command_allowed(u16 opcode)
{
@@ -211,7 +209,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
/**
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
- * @cxlm: &struct cxl_mem device whose mailbox will be used.
+ * @cxlds: The device data for the operation
* @send_cmd: &struct cxl_send_command copied in from userspace.
* @out_cmd: Sanitized and populated &struct cxl_mem_command.
*
@@ -228,7 +226,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
*
* See handle_mailbox_cmd_from_user()
*/
-static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
+static int cxl_validate_cmd_from_user(struct cxl_dev_state *cxlds,
const struct cxl_send_command *send_cmd,
struct cxl_mem_command *out_cmd)
{
@@ -243,7 +241,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
* supports, but output can be arbitrarily large (simply write out as
* much data as the hardware provides).
*/
- if (send_cmd->in.size > cxlm->payload_size)
+ if (send_cmd->in.size > cxlds->payload_size)
return -EINVAL;
/*
@@ -269,7 +267,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
* gets passed along without further checking, so it must be
* validated here.
*/
- if (send_cmd->out.size > cxlm->payload_size)
+ if (send_cmd->out.size > cxlds->payload_size)
return -EINVAL;
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
@@ -294,11 +292,11 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
info = &c->info;
/* Check that the command is enabled for hardware */
- if (!test_bit(info->id, cxlm->enabled_cmds))
+ if (!test_bit(info->id, cxlds->enabled_cmds))
return -ENOTTY;
/* Check that the command is not claimed for exclusive kernel use */
- if (test_bit(info->id, cxlm->exclusive_cmds))
+ if (test_bit(info->id, cxlds->exclusive_cmds))
return -EBUSY;
/* Check the input buffer is the expected size */
@@ -356,7 +354,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
/**
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
- * @cxlm: The CXL memory device to communicate with.
+ * @cxlds: The device data for the operation
* @cmd: The validated command.
* @in_payload: Pointer to userspace's input payload.
* @out_payload: Pointer to userspace's output payload.
@@ -379,12 +377,12 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
*
* See cxl_send_cmd().
*/
-static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
+static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
const struct cxl_mem_command *cmd,
u64 in_payload, u64 out_payload,
s32 *size_out, u32 *retval)
{
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
struct cxl_mbox_cmd mbox_cmd = {
.opcode = cmd->opcode,
.size_in = cmd->info.size_in,
@@ -417,7 +415,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW,
"raw command path used\n");
- rc = cxlm->mbox_send(cxlm, &mbox_cmd);
+ rc = cxlds->mbox_send(cxlds, &mbox_cmd);
if (rc)
goto out;
@@ -447,7 +445,7 @@ out:
int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
{
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxlmd->dev;
struct cxl_send_command send;
struct cxl_mem_command c;
@@ -458,15 +456,15 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
if (copy_from_user(&send, s, sizeof(send)))
return -EFAULT;
- rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c);
+ rc = cxl_validate_cmd_from_user(cxlmd->cxlds, &send, &c);
if (rc)
return rc;
/* Prepare to handle a full payload for variable sized output */
if (c.info.size_out < 0)
- c.info.size_out = cxlm->payload_size;
+ c.info.size_out = cxlds->payload_size;
- rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload,
+ rc = handle_mailbox_cmd_from_user(cxlds, &c, send.in.payload,
send.out.payload, &send.out.size,
&send.retval);
if (rc)
@@ -478,13 +476,13 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
return 0;
}
-static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
+static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
{
u32 remaining = size;
u32 offset = 0;
while (remaining) {
- u32 xfer_size = min_t(u32, remaining, cxlm->payload_size);
+ u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
struct cxl_mbox_get_log log = {
.uuid = *uuid,
.offset = cpu_to_le32(offset),
@@ -492,8 +490,8 @@ static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
};
int rc;
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log,
- sizeof(log), out, xfer_size);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log),
+ out, xfer_size);
if (rc < 0)
return rc;
@@ -507,14 +505,14 @@ static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
/**
* cxl_walk_cel() - Walk through the Command Effects Log.
- * @cxlm: Device.
+ * @cxlds: The device data for the operation
* @size: Length of the Command Effects Log.
* @cel: CEL
*
* Iterate over each entry in the CEL and determine if the driver supports the
* command. If so, the command is enabled for the device and can be used later.
*/
-static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
+static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
{
struct cxl_cel_entry *cel_entry;
const int cel_entries = size / sizeof(*cel_entry);
@@ -527,26 +525,26 @@ static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
if (!cmd) {
- dev_dbg(cxlm->dev,
+ dev_dbg(cxlds->dev,
"Opcode 0x%04x unsupported by driver", opcode);
continue;
}
- set_bit(cmd->info.id, cxlm->enabled_cmds);
+ set_bit(cmd->info.id, cxlds->enabled_cmds);
}
}
-static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
+static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_supported_logs *ret;
int rc;
- ret = kvmalloc(cxlm->payload_size, GFP_KERNEL);
+ ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL,
- 0, ret, cxlm->payload_size);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret,
+ cxlds->payload_size);
if (rc < 0) {
kvfree(ret);
return ERR_PTR(rc);
@@ -567,23 +565,23 @@ static const uuid_t log_uuid[] = {
};
/**
- * cxl_mem_enumerate_cmds() - Enumerate commands for a device.
- * @cxlm: The device.
+ * cxl_enumerate_cmds() - Enumerate commands for a device.
+ * @cxlds: The device data for the operation
*
* Returns 0 if enumerate completed successfully.
*
* CXL devices have optional support for certain commands. This function will
* determine the set of supported commands for the hardware and update the
- * enabled_cmds bitmap in the @cxlm.
+ * enabled_cmds bitmap in the @cxlds.
*/
-int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
+int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_supported_logs *gsl;
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
struct cxl_mem_command *cmd;
int i, rc;
- gsl = cxl_get_gsl(cxlm);
+ gsl = cxl_get_gsl(cxlds);
if (IS_ERR(gsl))
return PTR_ERR(gsl);
@@ -604,19 +602,19 @@ int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
goto out;
}
- rc = cxl_xfer_log(cxlm, &uuid, size, log);
+ rc = cxl_xfer_log(cxlds, &uuid, size, log);
if (rc) {
kvfree(log);
goto out;
}
- cxl_walk_cel(cxlm, size, log);
+ cxl_walk_cel(cxlds, size, log);
kvfree(log);
/* In case CEL was bogus, enable some default commands. */
cxl_for_each_cmd(cmd)
if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
- set_bit(cmd->info.id, cxlm->enabled_cmds);
+ set_bit(cmd->info.id, cxlds->enabled_cmds);
/* Found the required CEL */
rc = 0;
@@ -626,11 +624,11 @@ out:
kvfree(gsl);
return rc;
}
-EXPORT_SYMBOL_GPL(cxl_mem_enumerate_cmds);
+EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
/**
* cxl_mem_get_partition_info - Get partition info
- * @cxlm: cxl_mem instance to update partition info
+ * @cxlds: The device data for the operation
*
* Retrieve the current partition info for the device specified. The active
* values are the current capacity in bytes. If not 0, the 'next' values are
@@ -640,7 +638,7 @@ EXPORT_SYMBOL_GPL(cxl_mem_enumerate_cmds);
*
* See CXL @8.2.9.5.2.1 Get Partition Info
*/
-static int cxl_mem_get_partition_info(struct cxl_mem *cxlm)
+static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_partition_info {
__le64 active_volatile_cap;
@@ -650,124 +648,124 @@ static int cxl_mem_get_partition_info(struct cxl_mem *cxlm)
} __packed pi;
int rc;
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO,
- NULL, 0, &pi, sizeof(pi));
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
+ &pi, sizeof(pi));
if (rc)
return rc;
- cxlm->active_volatile_bytes =
+ cxlds->active_volatile_bytes =
le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlm->active_persistent_bytes =
+ cxlds->active_persistent_bytes =
le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlm->next_volatile_bytes =
+ cxlds->next_volatile_bytes =
le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlm->next_persistent_bytes =
+ cxlds->next_persistent_bytes =
le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
return 0;
}
/**
- * cxl_mem_identify() - Send the IDENTIFY command to the device.
- * @cxlm: The device to identify.
+ * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
+ * @cxlds: The device data for the operation
*
* Return: 0 if identify was executed successfully.
*
* This will dispatch the identify command to the device and on success populate
* structures to be exported to sysfs.
*/
-int cxl_mem_identify(struct cxl_mem *cxlm)
+int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
{
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify id;
int rc;
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
- sizeof(id));
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
+ sizeof(id));
if (rc < 0)
return rc;
- cxlm->total_bytes =
+ cxlds->total_bytes =
le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlm->volatile_only_bytes =
+ cxlds->volatile_only_bytes =
le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlm->persistent_only_bytes =
+ cxlds->persistent_only_bytes =
le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlm->partition_align_bytes =
+ cxlds->partition_align_bytes =
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
- dev_dbg(cxlm->dev,
+ dev_dbg(cxlds->dev,
"Identify Memory Device\n"
" total_bytes = %#llx\n"
" volatile_only_bytes = %#llx\n"
" persistent_only_bytes = %#llx\n"
" partition_align_bytes = %#llx\n",
- cxlm->total_bytes, cxlm->volatile_only_bytes,
- cxlm->persistent_only_bytes, cxlm->partition_align_bytes);
+ cxlds->total_bytes, cxlds->volatile_only_bytes,
+ cxlds->persistent_only_bytes, cxlds->partition_align_bytes);
- cxlm->lsa_size = le32_to_cpu(id.lsa_size);
- memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
+ cxlds->lsa_size = le32_to_cpu(id.lsa_size);
+ memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_mem_identify);
+EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
-int cxl_mem_create_range_info(struct cxl_mem *cxlm)
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
{
int rc;
- if (cxlm->partition_align_bytes == 0) {
- cxlm->ram_range.start = 0;
- cxlm->ram_range.end = cxlm->volatile_only_bytes - 1;
- cxlm->pmem_range.start = cxlm->volatile_only_bytes;
- cxlm->pmem_range.end = cxlm->volatile_only_bytes +
- cxlm->persistent_only_bytes - 1;
+ if (cxlds->partition_align_bytes == 0) {
+ cxlds->ram_range.start = 0;
+ cxlds->ram_range.end = cxlds->volatile_only_bytes - 1;
+ cxlds->pmem_range.start = cxlds->volatile_only_bytes;
+ cxlds->pmem_range.end = cxlds->volatile_only_bytes +
+ cxlds->persistent_only_bytes - 1;
return 0;
}
- rc = cxl_mem_get_partition_info(cxlm);
+ rc = cxl_mem_get_partition_info(cxlds);
if (rc) {
- dev_err(cxlm->dev, "Failed to query partition information\n");
+ dev_err(cxlds->dev, "Failed to query partition information\n");
return rc;
}
- dev_dbg(cxlm->dev,
+ dev_dbg(cxlds->dev,
"Get Partition Info\n"
" active_volatile_bytes = %#llx\n"
" active_persistent_bytes = %#llx\n"
" next_volatile_bytes = %#llx\n"
" next_persistent_bytes = %#llx\n",
- cxlm->active_volatile_bytes, cxlm->active_persistent_bytes,
- cxlm->next_volatile_bytes, cxlm->next_persistent_bytes);
+ cxlds->active_volatile_bytes, cxlds->active_persistent_bytes,
+ cxlds->next_volatile_bytes, cxlds->next_persistent_bytes);
- cxlm->ram_range.start = 0;
- cxlm->ram_range.end = cxlm->active_volatile_bytes - 1;
+ cxlds->ram_range.start = 0;
+ cxlds->ram_range.end = cxlds->active_volatile_bytes - 1;
- cxlm->pmem_range.start = cxlm->active_volatile_bytes;
- cxlm->pmem_range.end =
- cxlm->active_volatile_bytes + cxlm->active_persistent_bytes - 1;
+ cxlds->pmem_range.start = cxlds->active_volatile_bytes;
+ cxlds->pmem_range.end =
+ cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1;
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_mem_create_range_info);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
-struct cxl_mem *cxl_mem_create(struct device *dev)
+struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
{
- struct cxl_mem *cxlm;
+ struct cxl_dev_state *cxlds;
- cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL);
- if (!cxlm) {
+ cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
+ if (!cxlds) {
dev_err(dev, "No memory available\n");
return ERR_PTR(-ENOMEM);
}
- mutex_init(&cxlm->mbox_mutex);
- cxlm->dev = dev;
+ mutex_init(&cxlds->mbox_mutex);
+ cxlds->dev = dev;
- return cxlm;
+ return cxlds;
}
-EXPORT_SYMBOL_GPL(cxl_mem_create);
+EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
static struct dentry *cxl_debugfs;