summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMike Christie <michael.christie@oracle.com>2023-04-07 23:05:44 +0300
committerMartin K. Petersen <martin.petersen@oracle.com>2023-04-12 04:55:35 +0300
commit5fd96a4e15de8442915a912233d800c56f49001d (patch)
tree75478f370bcb2d4512a4b49f96877b5b3d834502 /drivers
parentf0614790b77300d69a1f37265f98b68c4835811b (diff)
downloadlinux-5fd96a4e15de8442915a912233d800c56f49001d.tar.xz
nvme: Add pr_ops read_keys support
This patch adds support for the pr_ops read_keys callout by calling the NVMe Reservation Report helper, then parsing that info to get the controller's registered keys. Because the callout is only used in the kernel where the callers, like LIO, do not know about controller/host IDs, the callout just returns the registered keys which is required by the SCSI PR in READ KEYS command. Signed-off-by: Mike Christie <michael.christie@oracle.com> Link: https://lore.kernel.org/r/20230407200551.12660-12-michael.christie@oracle.com Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/nvme/host/pr.c69
1 files changed, 69 insertions, 0 deletions
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index cd93d2e5b340..0ee656404437 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -154,10 +154,79 @@ static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
+static int nvme_pr_resv_report(struct block_device *bdev, void *data,
+ u32 data_len, bool *eds)
+{
+ struct nvme_command c = { };
+ int ret;
+
+ c.common.opcode = nvme_cmd_resv_report;
+ c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len));
+ c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT);
+ *eds = true;
+
+retry:
+ ret = nvme_send_pr_command(bdev, &c, data, data_len);
+ if (ret == NVME_SC_HOST_ID_INCONSIST &&
+ c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) {
+ c.common.cdw11 = 0;
+ *eds = false;
+ goto retry;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return nvme_sc_to_pr_err(ret);
+}
+
+static int nvme_pr_read_keys(struct block_device *bdev,
+ struct pr_keys *keys_info)
+{
+ u32 rse_len, num_keys = keys_info->num_keys;
+ struct nvme_reservation_status_ext *rse;
+ int ret, i;
+ bool eds;
+
+ /*
+ * Assume we are using 128-bit host IDs and allocate a buffer large
+ * enough to get enough keys to fill the return keys buffer.
+ */
+ rse_len = struct_size(rse, regctl_eds, num_keys);
+ rse = kzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+
+ ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
+ if (ret)
+ goto free_rse;
+
+ keys_info->generation = le32_to_cpu(rse->gen);
+ keys_info->num_keys = get_unaligned_le16(&rse->regctl);
+
+ num_keys = min(num_keys, keys_info->num_keys);
+ for (i = 0; i < num_keys; i++) {
+ if (eds) {
+ keys_info->keys[i] =
+ le64_to_cpu(rse->regctl_eds[i].rkey);
+ } else {
+ struct nvme_reservation_status *rs;
+
+ rs = (struct nvme_reservation_status *)rse;
+ keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey);
+ }
+ }
+
+free_rse:
+ kfree(rse);
+ return ret;
+}
+
const struct pr_ops nvme_pr_ops = {
.pr_register = nvme_pr_register,
.pr_reserve = nvme_pr_reserve,
.pr_release = nvme_pr_release,
.pr_preempt = nvme_pr_preempt,
.pr_clear = nvme_pr_clear,
+ .pr_read_keys = nvme_pr_read_keys,
};