summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-06-30 22:21:16 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-30 22:21:16 +0300
commit440462198d9c45e48f2d8d9b18c5702d92282f46 (patch)
tree9aab5db02f35d0cf9034108116b6a483147791ad /drivers/nvme
parentdf668a5fe461bb9d7e899c538acc7197746038f4 (diff)
parent5ed9b357024dc43f75099f597187df05bcd5173c (diff)
downloadlinux-440462198d9c45e48f2d8d9b18c5702d92282f46.tar.xz
Merge tag 'for-5.14/drivers-2021-06-29' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: "Pretty calm round, mostly just NVMe and a bit of MD: - NVMe updates (via Christoph) - improve the APST configuration algorithm (Alexey Bogoslavsky) - look for StorageD3Enable on companion ACPI device (Mario Limonciello) - allow selecting the network interface for TCP connections (Martin Belanger) - misc cleanups (Amit Engel, Chaitanya Kulkarni, Colin Ian King, Christoph) - move the ACPI StorageD3 code to drivers/acpi/ and add quirks for certain AMD CPUs (Mario Limonciello) - zoned device support for nvmet (Chaitanya Kulkarni) - fix the rules for changing the serial number in nvmet (Noam Gottlieb) - various small fixes and cleanups (Dan Carpenter, JK Kim, Chaitanya Kulkarni, Hannes Reinecke, Wesley Sheng, Geert Uytterhoeven, Daniel Wagner) - MD updates (Via Song) - iostats rewrite (Guoqing Jiang) - raid5 lock contention optimization (Gal Ofri) - Fall through warning fix (Gustavo) - Misc fixes (Gustavo, Jiapeng)" * tag 'for-5.14/drivers-2021-06-29' of git://git.kernel.dk/linux-block: (78 commits) nvmet: use NVMET_MAX_NAMESPACES to set nn value loop: Fix missing discard support when using LOOP_CONFIGURE nvme.h: add missing nvme_lba_range_type endianness annotations nvme: remove zeroout memset call for struct nvme-pci: remove zeroout memset call for struct nvmet: remove zeroout memset call for struct nvmet: add ZBD over ZNS backend support nvmet: add Command Set Identifier support nvmet: add nvmet_req_bio put helper for backends nvmet: add req cns error complete helper block: export blk_next_bio() nvmet: remove local variable nvmet: use nvme status value directly nvmet: use u32 type for the local variable nsid nvmet: use u32 for nvmet_subsys max_nsid nvmet: use req->cmd directly in file-ns fast path nvmet: use req->cmd directly in bdev-ns fast path nvmet: make ver stable once connection established nvmet: allow mn change if subsys not discovered nvmet: make sn stable once connection was established ...
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/host/core.c192
-rw-r--r--drivers/nvme/host/fabrics.c58
-rw-r--r--drivers/nvme/host/fabrics.h6
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/ioctl.c61
-rw-r--r--drivers/nvme/host/multipath.c33
-rw-r--r--drivers/nvme/host/nvme.h17
-rw-r--r--drivers/nvme/host/pci.c82
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/tcp.c31
-rw-r--r--drivers/nvme/host/zns.c27
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/admin-cmd.c155
-rw-r--r--drivers/nvme/target/configfs.c102
-rw-r--r--drivers/nvme/target/core.c100
-rw-r--r--drivers/nvme/target/discovery.c8
-rw-r--r--drivers/nvme/target/fc.c10
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c36
-rw-r--r--drivers/nvme/target/io-cmd-file.c4
-rw-r--r--drivers/nvme/target/nvmet.h41
-rw-r--r--drivers/nvme/target/passthru.c3
-rw-r--r--drivers/nvme/target/rdma.c3
-rw-r--r--drivers/nvme/target/zns.c615
24 files changed, 1214 insertions, 377 deletions
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 494675aeaaad..c3f3d77f1aac 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -21,7 +21,7 @@ config NVME_MULTIPATH
help
This option enables support for multipath access to NVMe
subsystems. If this option is enabled only a single
- /dev/nvmeXnY device will show up for each NVMe namespaces,
+ /dev/nvmeXnY device will show up for each NVMe namespace,
even if it is accessible through multiple controllers.
config NVME_HWMON
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 01889a8abb6b..80c656dcbbac 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -57,6 +57,26 @@ static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
+static unsigned long apst_primary_timeout_ms = 100;
+module_param(apst_primary_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(apst_primary_timeout_ms,
+ "primary APST timeout in ms");
+
+static unsigned long apst_secondary_timeout_ms = 2000;
+module_param(apst_secondary_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(apst_secondary_timeout_ms,
+ "secondary APST timeout in ms");
+
+static unsigned long apst_primary_latency_tol_us = 15000;
+module_param(apst_primary_latency_tol_us, ulong, 0644);
+MODULE_PARM_DESC(apst_primary_latency_tol_us,
+ "primary APST latency tolerance in us");
+
+static unsigned long apst_secondary_latency_tol_us = 100000;
+module_param(apst_secondary_latency_tol_us, ulong, 0644);
+MODULE_PARM_DESC(apst_secondary_latency_tol_us,
+ "secondary APST latency tolerance in us");
+
static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
@@ -701,9 +721,7 @@ EXPORT_SYMBOL_GPL(__nvme_check_ready);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
+ struct nvme_command c = { };
c.directive.opcode = nvme_admin_directive_send;
c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
@@ -728,9 +746,8 @@ static int nvme_enable_streams(struct nvme_ctrl *ctrl)
static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
struct streams_directive_params *s, u32 nsid)
{
- struct nvme_command c;
+ struct nvme_command c = { };
- memset(&c, 0, sizeof(c));
memset(s, 0, sizeof(*s));
c.directive.opcode = nvme_admin_directive_recv;
@@ -1440,10 +1457,9 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen, u32 *result)
{
union nvme_result res = { 0 };
- struct nvme_command c;
+ struct nvme_command c = { };
int ret;
- memset(&c, 0, sizeof(c));
c.features.opcode = op;
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
@@ -1522,36 +1538,6 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
queue_work(nvme_wq, &ctrl->async_event_work);
}
-/*
- * Issue ioctl requests on the first available path. Note that unlike normal
- * block layer requests we will not retry failed request on another controller.
- */
-struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
- struct nvme_ns_head **head, int *srcu_idx)
-{
-#ifdef CONFIG_NVME_MULTIPATH
- if (disk->fops == &nvme_ns_head_ops) {
- struct nvme_ns *ns;
-
- *head = disk->private_data;
- *srcu_idx = srcu_read_lock(&(*head)->srcu);
- ns = nvme_find_path(*head);
- if (!ns)
- srcu_read_unlock(&(*head)->srcu, *srcu_idx);
- return ns;
- }
-#endif
- *head = NULL;
- *srcu_idx = -1;
- return disk->private_data;
-}
-
-void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
-{
- if (head)
- srcu_read_unlock(&head->srcu, idx);
-}
-
static int nvme_ns_open(struct nvme_ns *ns)
{
@@ -1601,9 +1587,8 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
u32 max_integrity_segments)
{
- struct blk_integrity integrity;
+ struct blk_integrity integrity = { };
- memset(&integrity, 0, sizeof(integrity));
switch (pi_type) {
case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc;
@@ -1948,30 +1933,45 @@ static char nvme_pr_type(enum pr_type type)
}
};
+static int nvme_send_ns_head_pr_command(struct block_device *bdev,
+ struct nvme_command *c, u8 data[16])
+{
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ int ret = -EWOULDBLOCK;
+
+ if (ns) {
+ c->common.nsid = cpu_to_le32(ns->head->ns_id);
+ ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
+ u8 data[16])
+{
+ c->common.nsid = cpu_to_le32(ns->head->ns_id);
+ return nvme_submit_sync_cmd(ns->queue, c, data, 16);
+}
+
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
u64 key, u64 sa_key, u8 op)
{
- struct nvme_ns_head *head = NULL;
- struct nvme_ns *ns;
- struct nvme_command c;
- int srcu_idx, ret;
+ struct nvme_command c = { };
u8 data[16] = { 0, };
- ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
- if (unlikely(!ns))
- return -EWOULDBLOCK;
-
put_unaligned_le64(key, &data[0]);
put_unaligned_le64(sa_key, &data[8]);
- memset(&c, 0, sizeof(c));
c.common.opcode = op;
- c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw10 = cpu_to_le32(cdw10);
- ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
- nvme_put_ns_from_disk(head, srcu_idx);
- return ret;
+ if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
+ bdev->bd_disk->fops == &nvme_ns_head_ops)
+ return nvme_send_ns_head_pr_command(bdev, &c, data);
+ return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
}
static int nvme_pr_register(struct block_device *bdev, u64 old,
@@ -2036,9 +2036,8 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
bool send)
{
struct nvme_ctrl *ctrl = data;
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
- memset(&cmd, 0, sizeof(cmd));
if (send)
cmd.common.opcode = nvme_admin_security_send;
else
@@ -2053,6 +2052,17 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
+#ifdef CONFIG_BLK_DEV_ZONED
+static int nvme_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
+ data);
+}
+#else
+#define nvme_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
+
static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
@@ -2218,13 +2228,53 @@ static int nvme_configure_acre(struct nvme_ctrl *ctrl)
}
/*
+ * The function checks whether the given total (exlat + enlat) latency of
+ * a power state allows the latter to be used as an APST transition target.
+ * It does so by comparing the latency to the primary and secondary latency
+ * tolerances defined by module params. If there's a match, the corresponding
+ * timeout value is returned and the matching tolerance index (1 or 2) is
+ * reported.
+ */
+static bool nvme_apst_get_transition_time(u64 total_latency,
+ u64 *transition_time, unsigned *last_index)
+{
+ if (total_latency <= apst_primary_latency_tol_us) {
+ if (*last_index == 1)
+ return false;
+ *last_index = 1;
+ *transition_time = apst_primary_timeout_ms;
+ return true;
+ }
+ if (apst_secondary_timeout_ms &&
+ total_latency <= apst_secondary_latency_tol_us) {
+ if (*last_index <= 2)
+ return false;
+ *last_index = 2;
+ *transition_time = apst_secondary_timeout_ms;
+ return true;
+ }
+ return false;
+}
+
+/*
* APST (Autonomous Power State Transition) lets us program a table of power
* state transitions that the controller will perform automatically.
- * We configure it with a simple heuristic: we are willing to spend at most 2%
- * of the time transitioning between power states. Therefore, when running in
- * any given state, we will enter the next lower-power non-operational state
- * after waiting 50 * (enlat + exlat) microseconds, as long as that state's exit
- * latency is under the requested maximum latency.
+ *
+ * Depending on module params, one of the two supported techniques will be used:
+ *
+ * - If the parameters provide explicit timeouts and tolerances, they will be
+ * used to build a table with up to 2 non-operational states to transition to.
+ * The default parameter values were selected based on the values used by
+ * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
+ * regeneration of the APST table in the event of switching between external
+ * and battery power, the timeouts and tolerances reflect a compromise
+ * between values used by Microsoft for AC and battery scenarios.
+ * - If not, we'll configure the table with a simple heuristic: we are willing
+ * to spend at most 2% of the time transitioning between power states.
+ * Therefore, when running in any given state, we will enter the next
+ * lower-power non-operational state after waiting 50 * (enlat + exlat)
+ * microseconds, as long as that state's exit latency is under the requested
+ * maximum latency.
*
* We will not autonomously enter any non-operational state for which the total
* latency exceeds ps_max_latency_us.
@@ -2240,6 +2290,7 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl)
int max_ps = -1;
int state;
int ret;
+ unsigned last_lt_index = UINT_MAX;
/*
* If APST isn't supported or if we haven't been initialized yet,
@@ -2298,13 +2349,19 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl)
le32_to_cpu(ctrl->psd[state].entry_lat);
/*
- * This state is good. Use it as the APST idle target for
- * higher power states.
+ * This state is good. It can be used as the APST idle target
+ * for higher power states.
*/
- transition_ms = total_latency_us + 19;
- do_div(transition_ms, 20);
- if (transition_ms > (1 << 24) - 1)
- transition_ms = (1 << 24) - 1;
+ if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
+ if (!nvme_apst_get_transition_time(total_latency_us,
+ &transition_ms, &last_lt_index))
+ continue;
+ } else {
+ transition_ms = total_latency_us + 19;
+ do_div(transition_ms, 20);
+ if (transition_ms > (1 << 24) - 1)
+ transition_ms = (1 << 24) - 1;
+ }
target = cpu_to_le64((state << 3) | (transition_ms << 8));
if (max_ps == -1)
@@ -4068,6 +4125,11 @@ static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
opts->host_traddr ?: "none");
+ if (ret)
+ return ret;
+
+ ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
+ opts->host_iface ?: "none");
}
return ret;
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 34a84d2086c7..1e6a7cc056ca 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -112,6 +112,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
(len) ? "," : "", ctrl->opts->host_traddr);
+ if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)
+ len += scnprintf(buf + len, size - len, "%shost_iface=%s",
+ (len) ? "," : "", ctrl->opts->host_iface);
len += scnprintf(buf + len, size - len, "\n");
return len;
@@ -187,11 +190,10 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32);
*/
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
union nvme_result res;
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.prop_get.opcode = nvme_fabrics_command;
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.attrib = 1;
@@ -233,10 +235,9 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read64);
*/
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.prop_set.opcode = nvme_fabrics_command;
cmd.prop_set.fctype = nvme_fabrics_type_property_set;
cmd.prop_set.attrib = 0;
@@ -254,28 +255,23 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
EXPORT_SYMBOL_GPL(nvmf_reg_write32);
/**
- * nvmf_log_connect_error() - Error-parsing-diagnostic print
- * out function for connect() errors.
- *
- * @ctrl: the specific /dev/nvmeX device that had the error.
- *
- * @errval: Error code to be decoded in a more human-friendly
- * printout.
- *
- * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM.
- *
- * @cmd: This is the SQE portion of a submission capsule.
- *
- * @data: This is the "Data" portion of a submission capsule.
+ * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
+ * connect() errors.
+ * @ctrl: The specific /dev/nvmeX device that had the error.
+ * @errval: Error code to be decoded in a more human-friendly
+ * printout.
+ * @offset: For use with the NVMe error code
+ * NVME_SC_CONNECT_INVALID_PARAM.
+ * @cmd: This is the SQE portion of a submission capsule.
+ * @data: This is the "Data" portion of a submission capsule.
*/
static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int errval, int offset, struct nvme_command *cmd,
struct nvmf_connect_data *data)
{
- int err_sctype = errval & (~NVME_SC_DNR);
+ int err_sctype = errval & ~NVME_SC_DNR;
switch (err_sctype) {
-
case (NVME_SC_CONNECT_INVALID_PARAM):
if (offset >> 16) {
char *inv_data = "Connect Invalid Data Parameter";
@@ -318,35 +314,30 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
}
}
break;
-
case NVME_SC_CONNECT_INVALID_HOST:
dev_err(ctrl->device,
"Connect for subsystem %s is not allowed, hostnqn: %s\n",
data->subsysnqn, data->hostnqn);
break;
-
case NVME_SC_CONNECT_CTRL_BUSY:
dev_err(ctrl->device,
"Connect command failed: controller is busy or not available\n");
break;
-
case NVME_SC_CONNECT_FORMAT:
dev_err(ctrl->device,
"Connect incompatible format: %d",
cmd->connect.recfmt);
break;
-
case NVME_SC_HOST_PATH_ERROR:
dev_err(ctrl->device,
"Connect command failed: host path error\n");
break;
-
default:
dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n",
err_sctype);
break;
- } /* switch (err_sctype) */
+ }
}
/**
@@ -371,12 +362,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
*/
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
union nvme_result res;
struct nvmf_connect_data *data;
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
cmd.connect.qid = 0;
@@ -439,12 +429,11 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
*/
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
struct nvmf_connect_data *data;
union nvme_result res;
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
cmd.connect.qid = cpu_to_le16(qid);
@@ -550,6 +539,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_KATO, "keep_alive_tmo=%d" },
{ NVMF_OPT_HOSTNQN, "hostnqn=%s" },
{ NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
+ { NVMF_OPT_HOST_IFACE, "host_iface=%s" },
{ NVMF_OPT_HOST_ID, "hostid=%s" },
{ NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
{ NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
@@ -759,6 +749,15 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
kfree(opts->host_traddr);
opts->host_traddr = p;
break;
+ case NVMF_OPT_HOST_IFACE:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->host_iface);
+ opts->host_iface = p;
+ break;
case NVMF_OPT_HOST_ID:
p = match_strdup(args);
if (!p) {
@@ -943,6 +942,7 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->trsvcid);
kfree(opts->subsysnqn);
kfree(opts->host_traddr);
+ kfree(opts->host_iface);
kfree(opts);
}
EXPORT_SYMBOL_GPL(nvmf_free_options);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index d7f7974dc208..c31dad69a773 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -66,6 +66,7 @@ enum {
NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
NVMF_OPT_TOS = 1 << 19,
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
+ NVMF_OPT_HOST_IFACE = 1 << 21,
};
/**
@@ -83,7 +84,9 @@ enum {
* @trsvcid: The transport-specific TRSVCID field for a port on the
* subsystem which is adding a controller.
* @host_traddr: A transport-specific field identifying the NVME host port
- * to use for the connection to the controller.
+ * to use for the connection to the controller.
+ * @host_iface: A transport-specific field identifying the NVME host
+ * interface to use for the connection to the controller.
* @queue_size: Number of IO queue elements.
* @nr_io_queues: Number of controller IO queues that will be established.
* @reconnect_delay: Time between two consecutive reconnect attempts.
@@ -108,6 +111,7 @@ struct nvmf_ctrl_options {
char *traddr;
char *trsvcid;
char *host_traddr;
+ char *host_iface;
size_t queue_size;
unsigned int nr_io_queues;
unsigned int reconnect_delay;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index f183f9fa03d0..7600863f7752 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3112,7 +3112,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
}
/* FC-NVME supports normal SGL Data Block Descriptors */
- if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) {
+ if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n");
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 9557ead02de1..d93928d1e5bd 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -177,6 +177,20 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
+static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
+ struct nvme_ns *ns, __u32 nsid)
+{
+ if (ns && nsid != ns->head->ns_id) {
+ dev_err(ctrl->device,
+ "%s: nsid (%u) in cmd does not match nsid (%u)"
+ "of namespace\n",
+ current->comm, nsid, ns->head->ns_id);
+ return false;
+ }
+
+ return true;
+}
+
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd)
{
@@ -192,12 +206,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return -EFAULT;
if (cmd.flags)
return -EINVAL;
- if (ns && cmd.nsid != ns->head->ns_id) {
- dev_err(ctrl->device,
- "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
- current->comm, cmd.nsid, ns->head->ns_id);
+ if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
return -EINVAL;
- }
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
@@ -242,12 +252,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return -EFAULT;
if (cmd.flags)
return -EINVAL;
- if (ns && cmd.nsid != ns->head->ns_id) {
- dev_err(ctrl->device,
- "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
- current->comm, cmd.nsid, ns->head->ns_id);
+ if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
return -EINVAL;
- }
memset(&c, 0, sizeof(c));
c.common.opcode = cmd.opcode;
@@ -372,12 +378,13 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
+ __releases(&head->srcu)
{
struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
nvme_get_ctrl(ns->ctrl);
- nvme_put_ns_from_disk(head, srcu_idx);
+ srcu_read_unlock(&head->srcu, srcu_idx);
ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
nvme_put_ctrl(ctrl);
@@ -387,14 +394,15 @@ static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct nvme_ns_head *head = NULL;
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
void __user *argp = (void __user *)arg;
struct nvme_ns *ns;
- int srcu_idx, ret;
+ int srcu_idx, ret = -EWOULDBLOCK;
- ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
- if (unlikely(!ns))
- return -EWOULDBLOCK;
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (!ns)
+ goto out_unlock;
/*
* Handle ioctls that apply to the controller instead of the namespace
@@ -402,12 +410,11 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
* deadlock when deleting namespaces using the passthrough interface.
*/
if (is_ctrl_ioctl(cmd))
- ret = nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
- else {
- ret = nvme_ns_ioctl(ns, cmd, argp);
- nvme_put_ns_from_disk(head, srcu_idx);
- }
+ return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
+ ret = nvme_ns_ioctl(ns, cmd, argp);
+out_unlock:
+ srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
@@ -419,21 +426,19 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
container_of(cdev, struct nvme_ns_head, cdev);
void __user *argp = (void __user *)arg;
struct nvme_ns *ns;
- int srcu_idx, ret;
+ int srcu_idx, ret = -EWOULDBLOCK;
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
- if (!ns) {
- srcu_read_unlock(&head->srcu, srcu_idx);
- return -EWOULDBLOCK;
- }
+ if (!ns)
+ goto out_unlock;
if (is_ctrl_ioctl(cmd))
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
ret = nvme_ns_ioctl(ns, cmd, argp);
- nvme_put_ns_from_disk(head, srcu_idx);
-
+out_unlock:
+ srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
#endif /* CONFIG_NVME_MULTIPATH */
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index b5fbdb416022..0ea5298469c3 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -349,6 +349,25 @@ static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
nvme_put_ns_head(disk->private_data);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct nvme_ns_head *head = disk->private_data;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (ns)
+ ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+#else
+#define nvme_ns_head_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
+
const struct block_device_operations nvme_ns_head_ops = {
.owner = THIS_MODULE,
.submit_bio = nvme_ns_head_submit_bio,
@@ -356,7 +375,7 @@ const struct block_device_operations nvme_ns_head_ops = {
.release = nvme_ns_head_release,
.ioctl = nvme_ns_head_ioctl,
.getgeo = nvme_getgeo,
- .report_zones = nvme_report_zones,
+ .report_zones = nvme_ns_head_report_zones,
.pr_ops = &nvme_pr_ops,
};
@@ -416,11 +435,6 @@ static void nvme_requeue_work(struct work_struct *work)
next = bio->bi_next;
bio->bi_next = NULL;
- /*
- * Reset disk to the mpath node and resubmit to select a new
- * path.
- */
- bio_set_dev(bio, head->disk->part0);
submit_bio_noacct(bio);
}
}
@@ -779,6 +793,13 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
!(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
return 0;
+ if (!ctrl->max_namespaces ||
+ ctrl->max_namespaces > le32_to_cpu(id->nn)) {
+ dev_err(ctrl->device,
+ "Invalid MNAN value %u\n", ctrl->max_namespaces);
+ return -EINVAL;
+ }
+
ctrl->anacap = id->anacap;
ctrl->anatt = id->anatt;
ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0015860ec12b..75420ceacc10 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -674,9 +674,6 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset);
-struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
- struct nvme_ns_head **head, int *srcu_idx);
-void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
bool nvme_tryget_ns_head(struct nvme_ns_head *head);
void nvme_put_ns_head(struct nvme_ns_head *head);
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
@@ -697,6 +694,7 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
+struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
#ifdef CONFIG_NVME_MULTIPATH
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
@@ -718,7 +716,6 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
-struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
@@ -810,17 +807,14 @@ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_revalidate_zones(struct nvme_ns *ns);
+int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
#ifdef CONFIG_BLK_DEV_ZONED
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
-int nvme_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
-
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd,
enum nvme_zone_mgmt_action action);
#else
-#define nvme_report_zones NULL
-
static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd,
enum nvme_zone_mgmt_action action)
@@ -875,6 +869,11 @@ static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
}
#endif
+static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
+{
+ return ctrl->sgls & ((1 << 0) | (1 << 1));
+}
+
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
void nvme_execute_passthru_rq(struct request *rq);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a29b170701fc..d3c5086673bc 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -307,13 +307,12 @@ static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
static void nvme_dbbuf_set(struct nvme_dev *dev)
{
- struct nvme_command c;
+ struct nvme_command c = { };
unsigned int i;
if (!dev->dbbuf_dbs)
return;
- memset(&c, 0, sizeof(c));
c.dbbuf.opcode = nvme_admin_dbbuf;
c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
@@ -536,7 +535,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
- if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+ if (!nvme_ctrl_sgl_supported(&dev->ctrl))
return false;
if (!iod->nvmeq->qid)
return false;
@@ -559,7 +558,6 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
dma_addr = next_dma_addr;
}
-
}
static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
@@ -576,7 +574,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
dma_addr = next_dma_addr;
}
-
}
static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
@@ -855,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
&cmnd->rw, &bv);
if (iod->nvmeq->qid && sgl_threshold &&
- dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
+ nvme_ctrl_sgl_supported(&dev->ctrl))
return nvme_setup_sgl_simple(dev, req,
&cmnd->rw, &bv);
}
@@ -1032,7 +1029,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{
- u16 tmp = nvmeq->cq_head + 1;
+ u32 tmp = nvmeq->cq_head + 1;
if (tmp == nvmeq->q_depth) {
nvmeq->cq_head = 0;
@@ -1114,9 +1111,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq = &dev->queues[0];
- struct nvme_command c;
+ struct nvme_command c = { };
- memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
nvme_submit_cmd(nvmeq, &c, true);
@@ -1124,9 +1120,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
- struct nvme_command c;
+ struct nvme_command c = { };
- memset(&c, 0, sizeof(c));
c.delete_queue.opcode = opcode;
c.delete_queue.qid = cpu_to_le16(id);
@@ -1136,7 +1131,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
struct nvme_queue *nvmeq, s16 vector)
{
- struct nvme_command c;
+ struct nvme_command c = { };
int flags = NVME_QUEUE_PHYS_CONTIG;
if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
@@ -1146,7 +1141,6 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
* Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
- memset(&c, 0, sizeof(c));
c.create_cq.opcode = nvme_admin_create_cq;
c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
c.create_cq.cqid = cpu_to_le16(qid);
@@ -1161,7 +1155,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
struct nvme_queue *nvmeq)
{
struct nvme_ctrl *ctrl = &dev->ctrl;
- struct nvme_command c;
+ struct nvme_command c = { };
int flags = NVME_QUEUE_PHYS_CONTIG;
/*
@@ -1176,7 +1170,6 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
* Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
- memset(&c, 0, sizeof(c));
c.create_sq.opcode = nvme_admin_create_sq;
c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
c.create_sq.sqid = cpu_to_le16(qid);
@@ -1257,7 +1250,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_queue *nvmeq = iod->nvmeq;
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
u32 csts = readl(dev->bar + NVME_REG_CSTS);
/* If PCI error recovery process is happening, we cannot reset or
@@ -1337,7 +1330,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
iod->aborted = 1;
- memset(&cmd, 0, sizeof(cmd));
cmd.abort.opcode = nvme_admin_abort_cmd;
cmd.abort.cid = req->tag;
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
@@ -1888,10 +1880,9 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
u64 dma_addr = dev->host_mem_descs_dma;
- struct nvme_command c;
+ struct nvme_command c = { };
int ret;
- memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
c.features.dword11 = cpu_to_le32(bits);
@@ -2265,9 +2256,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
{
struct request_queue *q = nvmeq->dev->ctrl.admin_q;
struct request *req;
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
- memset(&cmd, 0, sizeof(cmd));
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
@@ -2828,54 +2818,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
return 0;
}
-#ifdef CONFIG_ACPI
-static bool nvme_acpi_storage_d3(struct pci_dev *dev)
-{
- struct acpi_device *adev;
- struct pci_dev *root;
- acpi_handle handle;
- acpi_status status;
- u8 val;
-
- /*
- * Look for _DSD property specifying that the storage device on the port
- * must use D3 to support deep platform power savings during
- * suspend-to-idle.
- */
- root = pcie_find_root_port(dev);
- if (!root)
- return false;
-
- adev = ACPI_COMPANION(&root->dev);
- if (!adev)
- return false;
-
- /*
- * The property is defined in the PXSX device for South complex ports
- * and in the PEGP device for North complex ports.
- */
- status = acpi_get_handle(adev->handle, "PXSX", &handle);
- if (ACPI_FAILURE(status)) {
- status = acpi_get_handle(adev->handle, "PEGP", &handle);
- if (ACPI_FAILURE(status))
- return false;
- }
-
- if (acpi_bus_get_device(handle, &adev))
- return false;
-
- if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
- &val))
- return false;
- return val == 1;
-}
-#else
-static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
-{
- return false;
-}
-#endif /* CONFIG_ACPI */
-
static void nvme_async_probe(void *data, async_cookie_t cookie)
{
struct nvme_dev *dev = data;
@@ -2925,7 +2867,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
quirks |= check_vendor_combination_bug(pdev);
- if (!noacpi && nvme_acpi_storage_d3(pdev)) {
+ if (!noacpi && acpi_storage_d3(&pdev->dev)) {
/*
* Some systems use a bios work around to ask for D3 on
* platforms that support kernel managed suspend.
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 4697a94c0945..a9e70cefd7ed 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1088,7 +1088,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{
- int ret = -EINVAL;
+ int ret;
bool changed;
ret = nvme_rdma_configure_admin_queue(ctrl, new);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 34f4b3402f7c..c7bd37103cf4 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -123,6 +123,7 @@ struct nvme_tcp_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct sockaddr_storage addr;
struct sockaddr_storage src_addr;
+ struct net_device *ndev;
struct nvme_ctrl ctrl;
struct work_struct err_work;
@@ -1455,6 +1456,20 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
}
}
+ if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
+ char *iface = nctrl->opts->host_iface;
+ sockptr_t optval = KERNEL_SOCKPTR(iface);
+
+ ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
+ optval, strlen(iface));
+ if (ret) {
+ dev_err(nctrl->device,
+ "failed to bind to interface %s queue %d err %d\n",
+ iface, qid, ret);
+ goto err_sock;
+ }
+ }
+
queue->hdr_digest = nctrl->opts->hdr_digest;
queue->data_digest = nctrl->opts->data_digest;
if (queue->hdr_digest || queue->data_digest) {
@@ -1973,11 +1988,13 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
return ret;
if (ctrl->icdoff) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->device, "icdoff is not supported!\n");
goto destroy_admin;
}
- if (!(ctrl->sgls & ((1 << 0) | (1 << 1)))) {
+ if (!nvme_ctrl_sgl_supported(ctrl)) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
goto destroy_admin;
}
@@ -2515,6 +2532,16 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
}
}
+ if (opts->mask & NVMF_OPT_HOST_IFACE) {
+ ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface);
+ if (!ctrl->ndev) {
+ pr_err("invalid interface passed: %s\n",
+ opts->host_iface);
+ ret = -ENODEV;
+ goto out_free_ctrl;
+ }
+ }
+
if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
ret = -EALREADY;
goto out_free_ctrl;
@@ -2571,7 +2598,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
- NVMF_OPT_TOS,
+ NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
.create_ctrl = nvme_tcp_create_ctrl,
};
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 475dd45c3db4..d95010481fce 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -171,8 +171,8 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
return cb(&zone, idx, data);
}
-static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nvme_zone_report *report;
struct nvme_command c = { };
@@ -180,6 +180,9 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nz, i;
size_t buflen;
+ if (ns->head->ids.csi != NVME_CSI_ZNS)
+ return -EINVAL;
+
report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen);
if (!report)
return -ENOMEM;
@@ -227,26 +230,6 @@ out_free:
return ret;
}
-int nvme_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
-{
- struct nvme_ns_head *head = NULL;
- struct nvme_ns *ns;
- int srcu_idx, ret;
-
- ns = nvme_get_ns_from_disk(disk, &head, &srcu_idx);
- if (unlikely(!ns))
- return -EWOULDBLOCK;
-
- if (ns->head->ids.csi == NVME_CSI_ZNS)
- ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
- else
- ret = -EINVAL;
- nvme_put_ns_from_disk(head, srcu_idx);
-
- return ret;
-}
-
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *c, enum nvme_zone_mgmt_action action)
{
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index ebf91fc4c72e..9837e580fa7e 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
+nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index dcd49a72f2f3..0cb98f2bbc8c 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -162,15 +162,8 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
{
- u16 status = NVME_SC_INTERNAL;
- struct nvme_effects_log *log;
-
- log = kzalloc(sizeof(*log), GFP_KERNEL);
- if (!log)
- goto out;
-
log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
@@ -184,9 +177,45 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
+}
- status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
+{
+ log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0);
+}
+
+static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+{
+ struct nvme_effects_log *log;
+ u16 status = NVME_SC_SUCCESS;
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ switch (req->cmd->get_log_page.csi) {
+ case NVME_CSI_NVM:
+ nvmet_get_cmd_effects_nvm(log);
+ break;
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ status = NVME_SC_INVALID_IO_CMD_SET;
+ goto free;
+ }
+ nvmet_get_cmd_effects_nvm(log);
+ nvmet_get_cmd_effects_zns(log);
+ break;
+ default:
+ status = NVME_SC_INVALID_LOG_PAGE;
+ goto free;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+free:
kfree(log);
out:
nvmet_req_complete(req, status);
@@ -313,22 +342,6 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
}
-static u16 nvmet_set_model_number(struct nvmet_subsys *subsys)
-{
- u16 status = 0;
-
- mutex_lock(&subsys->lock);
- if (!subsys->model_number) {
- subsys->model_number =
- kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
- if (!subsys->model_number)
- status = NVME_SC_INTERNAL;
- }
- mutex_unlock(&subsys->lock);
-
- return status;
-}
-
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -337,14 +350,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
u32 cmd_capsule_size;
u16 status = 0;
- /*
- * If there is no model number yet, set it now. It will then remain
- * stable for the life time of the subsystem.
- */
- if (!subsys->model_number) {
- status = nvmet_set_model_number(subsys);
- if (status)
- goto out;
+ if (!subsys->subsys_discovered) {
+ mutex_lock(&subsys->lock);
+ subsys->subsys_discovered = true;
+ mutex_unlock(&subsys->lock);
}
id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -357,9 +366,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->vid = 0;
id->ssvid = 0;
- memset(id->sn, ' ', sizeof(id->sn));
- bin2hex(id->sn, &ctrl->subsys->serial,
- min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
+ memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
strlen(subsys->model_number), ' ');
memcpy_and_pad(id->fr, sizeof(id->fr),
@@ -415,7 +422,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
- id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
+ id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
NVME_CTRL_ONCS_WRITE_ZEROES);
@@ -635,6 +642,12 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
goto out;
}
+ status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
+ NVME_NIDT_CSI_LEN,
+ &req->ns->csi, &off);
+ if (status)
+ goto out;
+
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
off) != NVME_IDENTIFY_DATA_SIZE - off)
status = NVME_SC_INTERNAL | NVME_SC_DNR;
@@ -643,6 +656,23 @@ out:
nvmet_req_complete(req, status);
}
+static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
+{
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ nvmet_execute_identify_desclist(req);
+ return true;
+ case NVME_CSI_ZNS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ nvmet_execute_identify_desclist(req);
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
static void nvmet_execute_identify(struct nvmet_req *req)
{
if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
@@ -650,19 +680,54 @@ static void nvmet_execute_identify(struct nvmet_req *req)
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_NS:
- return nvmet_execute_identify_ns(req);
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ return nvmet_execute_identify_ns(req);
+ default:
+ break;
+ }
+ break;
+ case NVME_ID_CNS_CS_NS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ return nvmet_execute_identify_cns_cs_ns(req);
+ default:
+ break;
+ }
+ }
+ break;
case NVME_ID_CNS_CTRL:
- return nvmet_execute_identify_ctrl(req);
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ return nvmet_execute_identify_ctrl(req);
+ }
+ break;
+ case NVME_ID_CNS_CS_CTRL:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ return nvmet_execute_identify_cns_cs_ctrl(req);
+ default:
+ break;
+ }
+ }
+ break;
case NVME_ID_CNS_NS_ACTIVE_LIST:
- return nvmet_execute_identify_nslist(req);
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ return nvmet_execute_identify_nslist(req);
+ default:
+ break;
+ }
+ break;
case NVME_ID_CNS_NS_DESC_LIST:
- return nvmet_execute_identify_desclist(req);
+ if (nvmet_handle_identify_desclist(req) == true)
+ return;
+ break;
}
- pr_debug("unhandled identify cns %d on qid %d\n",
- req->cmd->identify.cns, req->sq->qid);
- req->error_loc = offsetof(struct nvme_identify, cns);
- nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+ nvmet_req_cns_error_complete(req);
}
/*
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 65a0cf99f557..273555127188 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1007,13 +1007,26 @@ static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
NVME_MINOR(subsys->ver));
}
-static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
- const char *page, size_t count)
+static ssize_t
+nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
{
- struct nvmet_subsys *subsys = to_subsys(item);
int major, minor, tertiary = 0;
int ret;
+ if (subsys->subsys_discovered) {
+ if (NVME_TERTIARY(subsys->ver))
+ pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+ else
+ pr_err("Can't set version number. %llu.%llu is already assigned\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver));
+ return -EINVAL;
+ }
+
/* passthru subsystems use the underlying controller's version */
if (nvmet_passthru_ctrl(subsys))
return -EINVAL;
@@ -1022,35 +1035,84 @@ static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
if (ret != 2 && ret != 3)
return -EINVAL;
- down_write(&nvmet_config_sem);
subsys->ver = NVME_VS(major, minor, tertiary);
- up_write(&nvmet_config_sem);
return count;
}
+
+static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
CONFIGFS_ATTR(nvmet_subsys_, attr_version);
+/* See Section 1.5 of NVMe 1.4 */
+static bool nvmet_is_ascii(const char c)
+{
+ return c >= 0x20 && c <= 0x7e;
+}
+
static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
- return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->serial);
}
-static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
- const char *page, size_t count)
+static ssize_t
+nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
{
- u64 serial;
+ int pos, len = strcspn(page, "\n");
- if (sscanf(page, "%llx\n", &serial) != 1)
+ if (subsys->subsys_discovered) {
+ pr_err("Can't set serial number. %s is already assigned\n",
+ subsys->serial);
return -EINVAL;
+ }
+
+ if (!len || len > NVMET_SN_MAX_SIZE) {
+ pr_err("Serial Number can not be empty or exceed %d Bytes\n",
+ NVMET_SN_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (pos = 0; pos < len; pos++) {
+ if (!nvmet_is_ascii(page[pos])) {
+ pr_err("Serial Number must contain only ASCII strings\n");
+ return -EINVAL;
+ }
+ }
+
+ memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
+
+ return count;
+}
+
+static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
down_write(&nvmet_config_sem);
- to_subsys(item)->serial = serial;
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
up_write(&nvmet_config_sem);
- return count;
+ return ret;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
@@ -1118,20 +1180,8 @@ static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
- int ret;
-
- mutex_lock(&subsys->lock);
- ret = snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number ?
- subsys->model_number : NVMET_DEFAULT_CTRL_MODEL);
- mutex_unlock(&subsys->lock);
-
- return ret;
-}
-/* See Section 1.5 of NVMe 1.4 */
-static bool nvmet_is_ascii(const char c)
-{
- return c >= 0x20 && c <= 0x7e;
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
}
static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
@@ -1139,7 +1189,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
{
int pos = 0, len;
- if (subsys->model_number) {
+ if (subsys->subsys_discovered) {
pr_err("Can't set model number. %s is already assigned\n",
subsys->model_number);
return -EINVAL;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b20b8d0a1144..ac7210a3ea1c 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -16,6 +16,7 @@
#include "nvmet.h"
struct workqueue_struct *buffered_io_wq;
+struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
@@ -43,43 +44,34 @@ DECLARE_RWSEM(nvmet_ana_sem);
inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
{
- u16 status;
-
switch (errno) {
case 0:
- status = NVME_SC_SUCCESS;
- break;
+ return NVME_SC_SUCCESS;
case -ENOSPC:
req->error_loc = offsetof(struct nvme_rw_command, length);
- status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
- break;
+ return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
case -EREMOTEIO:
req->error_loc = offsetof(struct nvme_rw_command, slba);
- status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
- break;
+ return NVME_SC_LBA_RANGE | NVME_SC_DNR;
case -EOPNOTSUPP:
req->error_loc = offsetof(struct nvme_common_command, opcode);
switch (req->cmd->common.opcode) {
case nvme_cmd_dsm:
case nvme_cmd_write_zeroes:
- status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
- break;
+ return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
default:
- status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
break;
case -ENODATA:
req->error_loc = offsetof(struct nvme_rw_command, nsid);
- status = NVME_SC_ACCESS_DENIED;
- break;
+ return NVME_SC_ACCESS_DENIED;
case -EIO:
fallthrough;
default:
req->error_loc = offsetof(struct nvme_common_command, opcode);
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
}
-
- return status;
}
u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
@@ -122,11 +114,11 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
return 0;
}
-static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
+static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
{
- unsigned long nsid = 0;
struct nvmet_ns *cur;
unsigned long idx;
+ u32 nsid = 0;
xa_for_each(&subsys->namespaces, idx, cur)
nsid = cur->nsid;
@@ -141,14 +133,13 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
{
- u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
struct nvmet_req *req;
mutex_lock(&ctrl->lock);
while (ctrl->nr_async_event_cmds) {
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, status);
+ nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
mutex_lock(&ctrl->lock);
}
mutex_unlock(&ctrl->lock);
@@ -412,7 +403,6 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
- INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
@@ -693,6 +683,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
uuid_gen(&ns->uuid);
ns->buffered_io = false;
+ ns->csi = NVME_CSI_NVM;
return ns;
}
@@ -895,10 +886,18 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
return ret;
}
- if (req->ns->file)
- return nvmet_file_parse_io_cmd(req);
-
- return nvmet_bdev_parse_io_cmd(req);
+ switch (req->ns->csi) {
+ case NVME_CSI_NVM:
+ if (req->ns->file)
+ return nvmet_file_parse_io_cmd(req);
+ return nvmet_bdev_parse_io_cmd(req);
+ case NVME_CSI_ZNS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return nvmet_bdev_zns_parse_io_cmd(req);
+ return NVME_SC_INVALID_IO_CMD_SET;
+ default:
+ return NVME_SC_INVALID_IO_CMD_SET;
+ }
}
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
@@ -1119,6 +1118,17 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
}
+static inline bool nvmet_css_supported(u8 cc_css)
+{
+ switch (cc_css <<= NVME_CC_CSS_SHIFT) {
+ case NVME_CC_CSS_NVM:
+ case NVME_CC_CSS_CSI:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
{
lockdep_assert_held(&ctrl->lock);
@@ -1138,7 +1148,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
if (nvmet_cc_mps(ctrl->cc) != 0 ||
nvmet_cc_ams(ctrl->cc) != 0 ||
- nvmet_cc_css(ctrl->cc) != 0) {
+ !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
ctrl->csts = NVME_CSTS_CFS;
return;
}
@@ -1189,6 +1199,8 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
{
/* command sets supported: NVMe command set: */
ctrl->cap = (1ULL << 37);
+ /* Controller supports one or more I/O Command Sets */
+ ctrl->cap |= (1ULL << 43);
/* CC.EN timeout in 500msec units: */
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
@@ -1358,6 +1370,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_LIST_HEAD(&ctrl->async_events);
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -1499,6 +1512,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type)
{
struct nvmet_subsys *subsys;
+ char serial[NVMET_SN_MAX_SIZE / 2];
+ int ret;
subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
if (!subsys)
@@ -1506,7 +1521,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
subsys->ver = NVMET_DEFAULT_VS;
/* generate a random serial number as our controllers are ephemeral: */
- get_random_bytes(&subsys->serial, sizeof(subsys->serial));
+ get_random_bytes(&serial, sizeof(serial));
+ bin2hex(subsys->serial, &serial, sizeof(serial));
+
+ subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
+ if (!subsys->model_number) {
+ ret = -ENOMEM;
+ goto free_subsys;
+ }
switch (type) {
case NVME_NQN_NVME:
@@ -1517,15 +1539,15 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
break;
default:
pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
- kfree(subsys);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto free_mn;
}
subsys->type = type;
subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
GFP_KERNEL);
if (!subsys->subsysnqn) {
- kfree(subsys);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto free_mn;
}
subsys->cntlid_min = NVME_CNTLID_MIN;
subsys->cntlid_max = NVME_CNTLID_MAX;
@@ -1537,6 +1559,12 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
INIT_LIST_HEAD(&subsys->hosts);
return subsys;
+
+free_mn:
+ kfree(subsys->model_number);
+free_subsys:
+ kfree(subsys);
+ return ERR_PTR(ret);
}
static void nvmet_subsys_free(struct kref *ref)
@@ -1575,11 +1603,15 @@ static int __init nvmet_init(void)
nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
+ zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
+ if (!zbd_wq)
+ return -ENOMEM;
+
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
WQ_MEM_RECLAIM, 0);
if (!buffered_io_wq) {
error = -ENOMEM;
- goto out;
+ goto out_free_zbd_work_queue;
}
error = nvmet_init_discovery();
@@ -1595,7 +1627,8 @@ out_exit_discovery:
nvmet_exit_discovery();
out_free_work_queue:
destroy_workqueue(buffered_io_wq);
-out:
+out_free_zbd_work_queue:
+ destroy_workqueue(zbd_wq);
return error;
}
@@ -1605,6 +1638,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
destroy_workqueue(buffered_io_wq);
+ destroy_workqueue(zbd_wq);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index fc3645fc2c24..7aa62bc6ae84 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -244,7 +244,6 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
- const char model[] = "Linux";
u16 status = 0;
if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
@@ -262,11 +261,10 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
goto out;
}
- memset(id->sn, ' ', sizeof(id->sn));
- bin2hex(id->sn, &ctrl->subsys->serial,
- min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
+ memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
memset(id->fr, ' ', sizeof(id->fr));
- memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number,
+ strlen(ctrl->subsys->model_number), ' ');
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 19e113240fff..22b5108168a6 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -2511,13 +2511,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
int ret;
/*
- * if there is no nvmet mapping to the targetport there
- * shouldn't be requests. just terminate them.
- */
- if (!tgtport->pe)
- goto transport_error;
-
- /*
* Fused commands are currently not supported in the linux
* implementation.
*
@@ -2544,7 +2537,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.cqe = &fod->rspiubuf.cqe;
- fod->req.port = tgtport->pe->port;
+ if (tgtport->pe)
+ fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 429263ca9b97..0fc2781ab970 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -47,6 +47,14 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->nows = to0based(ql->io_opt / ql->logical_block_size);
}
+void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
+{
+ if (ns->bdev) {
+ blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
+ ns->bdev = NULL;
+ }
+}
+
static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
{
struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
@@ -86,15 +94,15 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
nvmet_bdev_ns_enable_integrity(ns);
- return 0;
-}
-
-void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
-{
- if (ns->bdev) {
- blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
- ns->bdev = NULL;
+ if (bdev_is_zoned(ns->bdev)) {
+ if (!nvmet_bdev_zns_enable(ns)) {
+ nvmet_bdev_ns_disable(ns);
+ return -EINVAL;
+ }
+ ns->csi = NVME_CSI_ZNS;
}
+
+ return 0;
}
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
@@ -102,7 +110,7 @@ void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
ns->size = i_size_read(ns->bdev->bd_inode);
}
-static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
+u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
{
u16 status = NVME_SC_SUCCESS;
@@ -164,8 +172,7 @@ static void nvmet_bio_done(struct bio *bio)
struct nvmet_req *req = bio->bi_private;
nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
- if (bio != &req->b.inline_bio)
- bio_put(bio);
+ nvmet_req_bio_put(req, bio);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -174,11 +181,10 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
{
struct blk_integrity *bi;
struct bio_integrity_payload *bip;
- struct block_device *bdev = req->ns->bdev;
int rc;
size_t resid, len;
- bi = bdev_get_integrity(bdev);
+ bi = bdev_get_integrity(req->ns->bdev);
if (unlikely(!bi)) {
pr_err("Unable to locate bio_integrity\n");
return -ENODEV;
@@ -430,9 +436,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
{
- struct nvme_command *cmd = req->cmd;
-
- switch (cmd->common.opcode) {
+ switch (req->cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_bdev_execute_rw;
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 7fdbdc496597..1dd1a0fe2e81 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -385,9 +385,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
{
- struct nvme_command *cmd = req->cmd;
-
- switch (cmd->common.opcode) {
+ switch (req->cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_file_execute_rw;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 53aea9a8056e..06dd3d537f07 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -28,6 +28,7 @@
#define NVMET_NO_ERROR_LOC ((u16)-1)
#define NVMET_DEFAULT_CTRL_MODEL "Linux"
#define NVMET_MN_MAX_SIZE 40
+#define NVMET_SN_MAX_SIZE 20
/*
* Supported optional AENs:
@@ -82,6 +83,7 @@ struct nvmet_ns {
struct pci_dev *p2p_dev;
int pi_type;
int metadata_size;
+ u8 csi;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -217,7 +219,7 @@ struct nvmet_subsys {
struct xarray namespaces;
unsigned int nr_namespaces;
- unsigned int max_nsid;
+ u32 max_nsid;
u16 cntlid_min;
u16 cntlid_max;
@@ -229,7 +231,8 @@ struct nvmet_subsys {
u16 max_qid;
u64 ver;
- u64 serial;
+ char serial[NVMET_SN_MAX_SIZE];
+ bool subsys_discovered;
char *subsysnqn;
bool pi_support;
@@ -247,6 +250,10 @@ struct nvmet_subsys {
unsigned int admin_timeout;
unsigned int io_timeout;
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ u8 zasl;
+#endif /* CONFIG_BLK_DEV_ZONED */
};
static inline struct nvmet_subsys *to_subsys(struct config_item *item)
@@ -332,6 +339,12 @@ struct nvmet_req {
struct work_struct work;
bool use_workqueue;
} p;
+#ifdef CONFIG_BLK_DEV_ZONED
+ struct {
+ struct bio inline_bio;
+ struct work_struct zmgmt_work;
+ } z;
+#endif /* CONFIG_BLK_DEV_ZONED */
};
int sg_cnt;
int metadata_sg_cnt;
@@ -351,6 +364,7 @@ struct nvmet_req {
};
extern struct workqueue_struct *buffered_io_wq;
+extern struct workqueue_struct *zbd_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
@@ -400,6 +414,7 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
@@ -527,6 +542,14 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
void nvmet_ns_revalidate(struct nvmet_ns *ns);
+u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
+
+bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
+void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
+void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
{
@@ -622,4 +645,18 @@ static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
}
+static inline void nvmet_req_cns_error_complete(struct nvmet_req *req)
+{
+ pr_debug("unhandled identify cns %d on qid %d\n",
+ req->cmd->identify.cns, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
+static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
+{
+ if (bio != &req->b.inline_bio)
+ bio_put(bio);
+}
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 39b1473f7204..fced52de33ce 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -206,8 +206,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
for_each_sg(req->sg, sg, req->sg_cnt, i) {
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
sg->offset) < sg->length) {
- if (bio != &req->p.inline_bio)
- bio_put(bio);
+ nvmet_req_bio_put(req, bio);
return -EINVAL;
}
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 7d607f435e36..891174ccd44b 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1257,7 +1257,7 @@ out_err:
static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
{
- struct ib_qp_init_attr qp_attr;
+ struct ib_qp_init_attr qp_attr = { };
struct nvmet_rdma_device *ndev = queue->dev;
int nr_cqe, ret, i, factor;
@@ -1275,7 +1275,6 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
goto out;
}
- memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.qp_context = queue;
qp_attr.event_handler = nvmet_rdma_qp_event;
qp_attr.send_cq = queue->cq;
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
new file mode 100644
index 000000000000..17f8b7a45f21
--- /dev/null
+++ b/drivers/nvme/target/zns.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe ZNS-ZBD command implementation.
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/nvme.h>
+#include <linux/blkdev.h>
+#include "nvmet.h"
+
+/*
+ * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
+ * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
+ * as page_shift value. When calculating the ZASL use shift by 12.
+ */
+#define NVMET_MPSMIN_SHIFT 12
+
+static inline u8 nvmet_zasl(unsigned int zone_append_sects)
+{
+ /*
+ * Zone Append Size Limit (zasl) is expressed as a power of 2 value
+ * with the minimum memory page size (i.e. 12) as unit.
+ */
+ return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
+}
+
+static int validate_conv_zones_cb(struct blk_zone *z,
+ unsigned int i, void *data)
+{
+ if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
+{
+ struct request_queue *q = ns->bdev->bd_disk->queue;
+ u8 zasl = nvmet_zasl(queue_max_zone_append_sectors(q));
+ struct gendisk *bd_disk = ns->bdev->bd_disk;
+ int ret;
+
+ if (ns->subsys->zasl) {
+ if (ns->subsys->zasl > zasl)
+ return false;
+ }
+ ns->subsys->zasl = zasl;
+
+ /*
+ * Generic zoned block devices may have a smaller last zone which is
+ * not supported by ZNS. Exclude zoned drives that have such smaller
+ * last zone.
+ */
+ if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
+ return false;
+ /*
+ * ZNS does not define a conventional zone type. If the underlying
+ * device has a bitmap set indicating the existence of conventional
+ * zones, reject the device. Otherwise, use report zones to detect if
+ * the device has conventional zones.
+ */
+ if (ns->bdev->bd_disk->queue->conv_zones_bitmap)
+ return false;
+
+ ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk),
+ validate_conv_zones_cb, NULL);
+ if (ret < 0)
+ return false;
+
+ ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+ return true;
+}
+
+void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
+{
+ u8 zasl = req->sq->ctrl->subsys->zasl;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_id_ctrl_zns *id;
+ u16 status;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ if (ctrl->ops->get_mdts)
+ id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
+ else
+ id->zasl = zasl;
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+{
+ struct nvme_id_ns_zns *id_zns;
+ u64 zsze;
+ u16 status;
+
+ if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto out;
+ }
+
+ id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
+ if (!id_zns) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_req_find_ns(req);
+ if (status) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+
+ if (!bdev_is_zoned(req->ns->bdev)) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto done;
+ }
+
+ nvmet_ns_revalidate(req->ns);
+ zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
+ req->ns->blksize_shift;
+ id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
+ id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
+ id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
+
+done:
+ status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
+ kfree(id_zns);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
+{
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+ u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+
+ if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
+ return NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ }
+
+ if (out_bufsize < sizeof(struct nvme_zone_report)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ switch (req->cmd->zmr.pr) {
+ case 0:
+ case 1:
+ break;
+ default:
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ switch (req->cmd->zmr.zrasf) {
+ case NVME_ZRASF_ZONE_REPORT_ALL:
+ case NVME_ZRASF_ZONE_STATE_EMPTY:
+ case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
+ case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
+ case NVME_ZRASF_ZONE_STATE_CLOSED:
+ case NVME_ZRASF_ZONE_STATE_FULL:
+ case NVME_ZRASF_ZONE_STATE_READONLY:
+ case NVME_ZRASF_ZONE_STATE_OFFLINE:
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ return NVME_SC_SUCCESS;
+}
+
+struct nvmet_report_zone_data {
+ struct nvmet_req *req;
+ u64 out_buf_offset;
+ u64 out_nr_zones;
+ u64 nr_zones;
+ u8 zrasf;
+};
+
+static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
+{
+ static const unsigned int nvme_zrasf_to_blk_zcond[] = {
+ [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY,
+ [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
+ [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
+ [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED,
+ [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
+ [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL,
+ [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE,
+ };
+ struct nvmet_report_zone_data *rz = d;
+
+ if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
+ z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
+ return 0;
+
+ if (rz->nr_zones < rz->out_nr_zones) {
+ struct nvme_zone_descriptor zdesc = { };
+ u16 status;
+
+ zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
+ zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
+ zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
+ zdesc.za = z->reset ? 1 << 2 : 0;
+ zdesc.zs = z->cond << 4;
+ zdesc.zt = z->type;
+
+ status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
+ sizeof(zdesc));
+ if (status)
+ return -EINVAL;
+
+ rz->out_buf_offset += sizeof(zdesc);
+ }
+
+ rz->nr_zones++;
+
+ return 0;
+}
+
+static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
+{
+ unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+
+ return blkdev_nr_zones(req->ns->bdev->bd_disk) -
+ (sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
+}
+
+static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
+{
+ if (bufsize <= sizeof(struct nvme_zone_report))
+ return 0;
+
+ return (bufsize - sizeof(struct nvme_zone_report)) /
+ sizeof(struct nvme_zone_descriptor);
+}
+
+static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
+ sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+ unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
+ u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+ __le64 nr_zones;
+ u16 status;
+ int ret;
+ struct nvmet_report_zone_data rz_data = {
+ .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
+ /* leave the place for report zone header */
+ .out_buf_offset = sizeof(struct nvme_zone_report),
+ .zrasf = req->cmd->zmr.zrasf,
+ .nr_zones = 0,
+ .req = req,
+ };
+
+ status = nvmet_bdev_validate_zone_mgmt_recv(req);
+ if (status)
+ goto out;
+
+ if (!req_slba_nr_zones) {
+ status = NVME_SC_SUCCESS;
+ goto out;
+ }
+
+ ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
+ nvmet_bdev_report_zone_cb, &rz_data);
+ if (ret < 0) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ /*
+ * When partial bit is set nr_zones must indicate the number of zone
+ * descriptors actually transferred.
+ */
+ if (req->cmd->zmr.pr)
+ rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
+
+ nr_zones = cpu_to_le64(rz_data.nr_zones);
+ status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
+
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
+{
+ INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
+ queue_work(zbd_wq, &req->z.zmgmt_work);
+}
+
+static inline enum req_opf zsa_req_op(u8 zsa)
+{
+ switch (zsa) {
+ case NVME_ZONE_OPEN:
+ return REQ_OP_ZONE_OPEN;
+ case NVME_ZONE_CLOSE:
+ return REQ_OP_ZONE_CLOSE;
+ case NVME_ZONE_FINISH:
+ return REQ_OP_ZONE_FINISH;
+ case NVME_ZONE_RESET:
+ return REQ_OP_ZONE_RESET;
+ default:
+ return REQ_OP_LAST;
+ }
+}
+
+static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
+{
+ switch (ret) {
+ case 0:
+ return NVME_SC_SUCCESS;
+ case -EINVAL:
+ case -EIO:
+ return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ default:
+ return NVME_SC_INTERNAL;
+ }
+}
+
+struct nvmet_zone_mgmt_send_all_data {
+ unsigned long *zbitmap;
+ struct nvmet_req *req;
+};
+
+static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
+{
+ struct nvmet_zone_mgmt_send_all_data *data = d;
+
+ switch (zsa_req_op(data->req->cmd->zms.zsa)) {
+ case REQ_OP_ZONE_OPEN:
+ switch (z->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ switch (z->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case REQ_OP_ZONE_FINISH:
+ switch (z->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ set_bit(i, data->zbitmap);
+
+ return 0;
+}
+
+static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
+{
+ struct block_device *bdev = req->ns->bdev;
+ unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk);
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct bio *bio = NULL;
+ sector_t sector = 0;
+ int ret;
+ struct nvmet_zone_mgmt_send_all_data d = {
+ .req = req,
+ };
+
+ d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
+ GFP_NOIO, q->node);
+ if (!d.zbitmap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Scan and build bitmap of the eligible zones */
+ ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
+ if (ret != nr_zones) {
+ if (ret > 0)
+ ret = -EIO;
+ goto out;
+ } else {
+ /* We scanned all the zones */
+ ret = 0;
+ }
+
+ while (sector < get_capacity(bdev->bd_disk)) {
+ if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
+ bio = blk_next_bio(bio, 0, GFP_KERNEL);
+ bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC;
+ bio->bi_iter.bi_sector = sector;
+ bio_set_dev(bio, bdev);
+ /* This may take a while, so be nice to others */
+ cond_resched();
+ }
+ sector += blk_queue_zone_sectors(q);
+ }
+
+ if (bio) {
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ }
+
+out:
+ kfree(d.zbitmap);
+
+ return blkdev_zone_mgmt_errno_to_nvme_status(ret);
+}
+
+static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
+{
+ int ret;
+
+ switch (zsa_req_op(req->cmd->zms.zsa)) {
+ case REQ_OP_ZONE_RESET:
+ ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
+ get_capacity(req->ns->bdev->bd_disk),
+ GFP_KERNEL);
+ if (ret < 0)
+ return blkdev_zone_mgmt_errno_to_nvme_status(ret);
+ break;
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return nvmet_bdev_zone_mgmt_emulate_all(req);
+ default:
+ /* this is needed to quiet compiler warning */
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
+ enum req_opf op = zsa_req_op(req->cmd->zms.zsa);
+ struct block_device *bdev = req->ns->bdev;
+ sector_t zone_sectors = bdev_zone_sectors(bdev);
+ u16 status = NVME_SC_SUCCESS;
+ int ret;
+
+ if (op == REQ_OP_LAST) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
+ status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ goto out;
+ }
+
+ /* when select all bit is set slba field is ignored */
+ if (req->cmd->zms.select_all) {
+ status = nvmet_bdev_execute_zmgmt_send_all(req);
+ goto out;
+ }
+
+ if (sect >= get_capacity(bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (sect & (zone_sectors - 1)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
+ if (ret < 0)
+ status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
+
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
+{
+ INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
+ queue_work(zbd_wq, &req->z.zmgmt_work);
+}
+
+static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
+{
+ struct nvmet_req *req = bio->bi_private;
+
+ if (bio->bi_status == BLK_STS_OK) {
+ req->cqe->result.u64 =
+ nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
+ }
+
+ nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
+ nvmet_req_bio_put(req, bio);
+}
+
+void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
+{
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+ u16 status = NVME_SC_SUCCESS;
+ unsigned int total_len = 0;
+ struct scatterlist *sg;
+ struct bio *bio;
+ int sg_cnt;
+
+ /* Request is completed on len mismatch in nvmet_check_transter_len() */
+ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
+ return;
+
+ if (!req->sg_cnt) {
+ nvmet_req_complete(req, 0);
+ return;
+ }
+
+ if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (nvmet_use_inline_bvec(req)) {
+ bio = &req->z.inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ } else {
+ bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
+ }
+
+ bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
+ bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
+ bio_set_dev(bio, req->ns->bdev);
+ bio->bi_iter.bi_sector = sect;
+ bio->bi_private = req;
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+ bio->bi_opf |= REQ_FUA;
+
+ for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
+ struct page *p = sg_page(sg);
+ unsigned int l = sg->length;
+ unsigned int o = sg->offset;
+ unsigned int ret;
+
+ ret = bio_add_zone_append_page(bio, p, l, o);
+ if (ret != sg->length) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_bio;
+ }
+ total_len += sg->length;
+ }
+
+ if (total_len != nvmet_rw_data_len(req)) {
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ goto out_put_bio;
+ }
+
+ submit_bio(bio);
+ return;
+
+out_put_bio:
+ nvmet_req_bio_put(req, bio);
+out:
+ nvmet_req_complete(req, status);
+}
+
+u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->common.opcode) {
+ case nvme_cmd_zone_append:
+ req->execute = nvmet_bdev_execute_zone_append;
+ return 0;
+ case nvme_cmd_zone_mgmt_recv:
+ req->execute = nvmet_bdev_execute_zone_mgmt_recv;
+ return 0;
+ case nvme_cmd_zone_mgmt_send:
+ req->execute = nvmet_bdev_execute_zone_mgmt_send;
+ return 0;
+ default:
+ return nvmet_bdev_parse_io_cmd(req);
+ }
+}