summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/Makefile2
-rw-r--r--drivers/nvme/common/Kconfig12
-rw-r--r--drivers/nvme/common/Makefile6
-rw-r--r--drivers/nvme/common/auth.c92
-rw-r--r--drivers/nvme/common/keyring.c224
-rw-r--r--drivers/nvme/host/Kconfig31
-rw-r--r--drivers/nvme/host/Makefile2
-rw-r--r--drivers/nvme/host/apple.c54
-rw-r--r--drivers/nvme/host/auth.c73
-rw-r--r--drivers/nvme/host/constants.c12
-rw-r--r--drivers/nvme/host/core.c1308
-rw-r--r--drivers/nvme/host/fabrics.c191
-rw-r--r--drivers/nvme/host/fabrics.h27
-rw-r--r--drivers/nvme/host/fault_inject.c3
-rw-r--r--drivers/nvme/host/fc.c179
-rw-r--r--drivers/nvme/host/hwmon.c2
-rw-r--r--drivers/nvme/host/ioctl.c243
-rw-r--r--drivers/nvme/host/multipath.c223
-rw-r--r--drivers/nvme/host/nvme.h224
-rw-r--r--drivers/nvme/host/pci.c187
-rw-r--r--drivers/nvme/host/pr.c13
-rw-r--r--drivers/nvme/host/rdma.c136
-rw-r--r--drivers/nvme/host/sysfs.c242
-rw-r--r--drivers/nvme/host/tcp.c354
-rw-r--r--drivers/nvme/host/trace.c105
-rw-r--r--drivers/nvme/host/zns.c65
-rw-r--r--drivers/nvme/target/Kconfig37
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/admin-cmd.c36
-rw-r--r--drivers/nvme/target/auth.c89
-rw-r--r--drivers/nvme/target/configfs.c273
-rw-r--r--drivers/nvme/target/core.c127
-rw-r--r--drivers/nvme/target/debugfs.c202
-rw-r--r--drivers/nvme/target/debugfs.h42
-rw-r--r--drivers/nvme/target/discovery.c18
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c77
-rw-r--r--drivers/nvme/target/fabrics-cmd.c66
-rw-r--r--drivers/nvme/target/fc.c227
-rw-r--r--drivers/nvme/target/fcloop.c42
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c51
-rw-r--r--drivers/nvme/target/loop.c18
-rw-r--r--drivers/nvme/target/nvmet.h39
-rw-r--r--drivers/nvme/target/passthru.c22
-rw-r--r--drivers/nvme/target/rdma.c96
-rw-r--r--drivers/nvme/target/tcp.c438
-rw-r--r--drivers/nvme/target/trace.c104
-rw-r--r--drivers/nvme/target/trace.h33
-rw-r--r--drivers/nvme/target/zns.c45
48 files changed, 4338 insertions, 1755 deletions
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
index eedca8c72098..74f59ceed3d5 100644
--- a/drivers/nvme/Makefile
+++ b/drivers/nvme/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_NVME_COMMON) += common/
+obj-y += common/
obj-y += host/
obj-y += target/
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig
index 4514f44362dd..244432e0b73d 100644
--- a/drivers/nvme/common/Kconfig
+++ b/drivers/nvme/common/Kconfig
@@ -1,4 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
-config NVME_COMMON
+config NVME_KEYRING
tristate
+ select KEYS
+
+config NVME_AUTH
+ tristate
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
diff --git a/drivers/nvme/common/Makefile b/drivers/nvme/common/Makefile
index 720c625b8a52..681514cf2e2f 100644
--- a/drivers/nvme/common/Makefile
+++ b/drivers/nvme/common/Makefile
@@ -2,6 +2,8 @@
ccflags-y += -I$(src)
-obj-$(CONFIG_NVME_COMMON) += nvme-common.o
+obj-$(CONFIG_NVME_AUTH) += nvme-auth.o
+obj-$(CONFIG_NVME_KEYRING) += nvme-keyring.o
-nvme-common-y += auth.o
+nvme-auth-y += auth.o
+nvme-keyring-y += keyring.o
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index d90e4f0c08b7..a3455f1d67fa 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -150,6 +150,14 @@ size_t nvme_auth_hmac_hash_len(u8 hmac_id)
}
EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
+u32 nvme_auth_key_struct_size(u32 key_len)
+{
+ struct nvme_dhchap_key key;
+
+ return struct_size(&key, key, key_len);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_key_struct_size);
+
struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
u8 key_hash)
{
@@ -163,14 +171,9 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
p = strrchr(secret, ':');
if (p)
allocated_len = p - secret;
- key = kzalloc(sizeof(*key), GFP_KERNEL);
+ key = nvme_auth_alloc_key(allocated_len, 0);
if (!key)
return ERR_PTR(-ENOMEM);
- key->key = kzalloc(allocated_len, GFP_KERNEL);
- if (!key->key) {
- ret = -ENOMEM;
- goto out_free_key;
- }
key_len = base64_decode(secret, allocated_len, key->key);
if (key_len < 0) {
@@ -187,14 +190,6 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
goto out_free_secret;
}
- if (key_hash > 0 &&
- (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
- pr_err("Mismatched key len %d for %s\n", key_len,
- nvme_auth_hmac_name(key_hash));
- ret = -EINVAL;
- goto out_free_secret;
- }
-
/* The last four bytes is the CRC in little-endian format */
key_len -= 4;
/*
@@ -213,37 +208,51 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
key->hash = key_hash;
return key;
out_free_secret:
- kfree_sensitive(key->key);
-out_free_key:
- kfree(key);
+ nvme_auth_free_key(key);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
+struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash)
+{
+ u32 num_bytes = nvme_auth_key_struct_size(len);
+ struct nvme_dhchap_key *key = kzalloc(num_bytes, GFP_KERNEL);
+
+ if (key) {
+ key->len = len;
+ key->hash = hash;
+ }
+ return key;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_alloc_key);
+
void nvme_auth_free_key(struct nvme_dhchap_key *key)
{
if (!key)
return;
- kfree_sensitive(key->key);
- kfree(key);
+ kfree_sensitive(key);
}
EXPORT_SYMBOL_GPL(nvme_auth_free_key);
-u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
+struct nvme_dhchap_key *nvme_auth_transform_key(
+ struct nvme_dhchap_key *key, char *nqn)
{
const char *hmac_name;
struct crypto_shash *key_tfm;
struct shash_desc *shash;
- u8 *transformed_key;
- int ret;
+ struct nvme_dhchap_key *transformed_key;
+ int ret, key_len;
- if (!key || !key->key) {
+ if (!key) {
pr_warn("No key specified\n");
return ERR_PTR(-ENOKEY);
}
if (key->hash == 0) {
- transformed_key = kmemdup(key->key, key->len, GFP_KERNEL);
- return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
+ key_len = nvme_auth_key_struct_size(key->len);
+ transformed_key = kmemdup(key, key_len, GFP_KERNEL);
+ if (!transformed_key)
+ return ERR_PTR(-ENOMEM);
+ return transformed_key;
}
hmac_name = nvme_auth_hmac_name(key->hash);
if (!hmac_name) {
@@ -253,7 +262,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
if (IS_ERR(key_tfm))
- return (u8 *)key_tfm;
+ return ERR_CAST(key_tfm);
shash = kmalloc(sizeof(struct shash_desc) +
crypto_shash_descsize(key_tfm),
@@ -263,7 +272,8 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
goto out_free_key;
}
- transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
+ key_len = crypto_shash_digestsize(key_tfm);
+ transformed_key = nvme_auth_alloc_key(key_len, key->hash);
if (!transformed_key) {
ret = -ENOMEM;
goto out_free_shash;
@@ -282,7 +292,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
if (ret < 0)
goto out_free_transformed_key;
- ret = crypto_shash_final(shash, transformed_key);
+ ret = crypto_shash_final(shash, transformed_key->key);
if (ret < 0)
goto out_free_transformed_key;
@@ -292,7 +302,7 @@ u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
return transformed_key;
out_free_transformed_key:
- kfree_sensitive(transformed_key);
+ nvme_auth_free_key(transformed_key);
out_free_shash:
kfree(shash);
out_free_key:
@@ -331,7 +341,6 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
u8 *challenge, u8 *aug, size_t hlen)
{
struct crypto_shash *tfm;
- struct shash_desc *desc;
u8 *hashed_key;
const char *hmac_name;
int ret;
@@ -359,29 +368,11 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
goto out_free_key;
}
- desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto out_free_hash;
- }
- desc->tfm = tfm;
-
ret = crypto_shash_setkey(tfm, hashed_key, hlen);
if (ret)
- goto out_free_desc;
-
- ret = crypto_shash_init(desc);
- if (ret)
- goto out_free_desc;
-
- ret = crypto_shash_update(desc, challenge, hlen);
- if (ret)
- goto out_free_desc;
+ goto out_free_hash;
- ret = crypto_shash_final(desc, aug);
-out_free_desc:
- kfree_sensitive(desc);
+ ret = crypto_shash_tfm_digest(tfm, challenge, hlen, aug);
out_free_hash:
crypto_free_shash(tfm);
out_free_key:
@@ -480,4 +471,5 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+MODULE_DESCRIPTION("NVMe Authentication framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c
new file mode 100644
index 000000000000..ed5167f942d8
--- /dev/null
+++ b/drivers/nvme/common/keyring.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Hannes Reinecke, SUSE Labs
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/key.h>
+#include <linux/key-type.h>
+#include <keys/user-type.h>
+#include <linux/nvme.h>
+#include <linux/nvme-tcp.h>
+#include <linux/nvme-keyring.h>
+
+static struct key *nvme_keyring;
+
+key_serial_t nvme_keyring_id(void)
+{
+ return nvme_keyring->serial;
+}
+EXPORT_SYMBOL_GPL(nvme_keyring_id);
+
+static bool nvme_tls_psk_revoked(struct key *psk)
+{
+ return test_bit(KEY_FLAG_REVOKED, &psk->flags) ||
+ test_bit(KEY_FLAG_INVALIDATED, &psk->flags);
+}
+
+struct key *nvme_tls_key_lookup(key_serial_t key_id)
+{
+ struct key *key = key_lookup(key_id);
+
+ if (IS_ERR(key)) {
+ pr_err("key id %08x not found\n", key_id);
+ return key;
+ }
+ if (nvme_tls_psk_revoked(key)) {
+ pr_err("key id %08x revoked\n", key_id);
+ return ERR_PTR(-EKEYREVOKED);
+ }
+ return key;
+}
+EXPORT_SYMBOL_GPL(nvme_tls_key_lookup);
+
+static void nvme_tls_psk_describe(const struct key *key, struct seq_file *m)
+{
+ seq_puts(m, key->description);
+ seq_printf(m, ": %u", key->datalen);
+}
+
+static bool nvme_tls_psk_match(const struct key *key,
+ const struct key_match_data *match_data)
+{
+ const char *match_id;
+ size_t match_len;
+
+ if (!key->description) {
+ pr_debug("%s: no key description\n", __func__);
+ return false;
+ }
+ if (!match_data->raw_data) {
+ pr_debug("%s: no match data\n", __func__);
+ return false;
+ }
+ match_id = match_data->raw_data;
+ match_len = strlen(match_id);
+ pr_debug("%s: match '%s' '%s' len %zd\n",
+ __func__, match_id, key->description, match_len);
+ return !memcmp(key->description, match_id, match_len);
+}
+
+static int nvme_tls_psk_match_preparse(struct key_match_data *match_data)
+{
+ match_data->lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE;
+ match_data->cmp = nvme_tls_psk_match;
+ return 0;
+}
+
+static struct key_type nvme_tls_psk_key_type = {
+ .name = "psk",
+ .flags = KEY_TYPE_NET_DOMAIN,
+ .preparse = user_preparse,
+ .free_preparse = user_free_preparse,
+ .match_preparse = nvme_tls_psk_match_preparse,
+ .instantiate = generic_key_instantiate,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = nvme_tls_psk_describe,
+ .read = user_read,
+};
+
+static struct key *nvme_tls_psk_lookup(struct key *keyring,
+ const char *hostnqn, const char *subnqn,
+ u8 hmac, u8 psk_ver, bool generated)
+{
+ char *identity;
+ size_t identity_len = (NVMF_NQN_SIZE) * 2 + 11;
+ key_ref_t keyref;
+ key_serial_t keyring_id;
+
+ identity = kzalloc(identity_len, GFP_KERNEL);
+ if (!identity)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(identity, identity_len, "NVMe%u%c%02u %s %s",
+ psk_ver, generated ? 'G' : 'R', hmac, hostnqn, subnqn);
+
+ if (!keyring)
+ keyring = nvme_keyring;
+ keyring_id = key_serial(keyring);
+ pr_debug("keyring %x lookup tls psk '%s'\n",
+ keyring_id, identity);
+ keyref = keyring_search(make_key_ref(keyring, true),
+ &nvme_tls_psk_key_type,
+ identity, false);
+ if (IS_ERR(keyref)) {
+ pr_debug("lookup tls psk '%s' failed, error %ld\n",
+ identity, PTR_ERR(keyref));
+ kfree(identity);
+ return ERR_PTR(-ENOKEY);
+ }
+ kfree(identity);
+
+ return key_ref_to_ptr(keyref);
+}
+
+/*
+ * NVMe PSK priority list
+ *
+ * 'Retained' PSKs (ie 'generated == false') should be preferred to 'generated'
+ * PSKs, PSKs with hash (psk_ver 1) should be preferred to PSKs without hash
+ * (psk_ver 0), and SHA-384 should be preferred to SHA-256.
+ */
+static struct nvme_tls_psk_priority_list {
+ bool generated;
+ u8 psk_ver;
+ enum nvme_tcp_tls_cipher cipher;
+} nvme_tls_psk_prio[] = {
+ { .generated = false,
+ .psk_ver = 1,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA384, },
+ { .generated = false,
+ .psk_ver = 1,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA256, },
+ { .generated = false,
+ .psk_ver = 0,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA384, },
+ { .generated = false,
+ .psk_ver = 0,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA256, },
+ { .generated = true,
+ .psk_ver = 1,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA384, },
+ { .generated = true,
+ .psk_ver = 1,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA256, },
+ { .generated = true,
+ .psk_ver = 0,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA384, },
+ { .generated = true,
+ .psk_ver = 0,
+ .cipher = NVME_TCP_TLS_CIPHER_SHA256, },
+};
+
+/*
+ * nvme_tls_psk_default - Return the preferred PSK to use for TLS ClientHello
+ */
+key_serial_t nvme_tls_psk_default(struct key *keyring,
+ const char *hostnqn, const char *subnqn)
+{
+ struct key *tls_key;
+ key_serial_t tls_key_id;
+ int prio;
+
+ for (prio = 0; prio < ARRAY_SIZE(nvme_tls_psk_prio); prio++) {
+ bool generated = nvme_tls_psk_prio[prio].generated;
+ u8 ver = nvme_tls_psk_prio[prio].psk_ver;
+ enum nvme_tcp_tls_cipher cipher = nvme_tls_psk_prio[prio].cipher;
+
+ tls_key = nvme_tls_psk_lookup(keyring, hostnqn, subnqn,
+ cipher, ver, generated);
+ if (!IS_ERR(tls_key)) {
+ tls_key_id = tls_key->serial;
+ key_put(tls_key);
+ return tls_key_id;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_tls_psk_default);
+
+static int __init nvme_keyring_init(void)
+{
+ int err;
+
+ nvme_keyring = keyring_alloc(".nvme",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ current_cred(),
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ (KEY_USR_ALL & ~KEY_USR_SETATTR),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+ if (IS_ERR(nvme_keyring))
+ return PTR_ERR(nvme_keyring);
+
+ err = register_key_type(&nvme_tls_psk_key_type);
+ if (err) {
+ key_put(nvme_keyring);
+ return err;
+ }
+ return 0;
+}
+
+static void __exit nvme_keyring_exit(void)
+{
+ unregister_key_type(&nvme_tls_psk_key_type);
+ key_revoke(nvme_keyring);
+ key_put(nvme_keyring);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+MODULE_DESCRIPTION("NVMe Keyring implementation");
+module_init(nvme_keyring_init);
+module_exit(nvme_keyring_exit);
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 2f6a7f8c94e8..486afe598184 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config NVME_CORE
tristate
- select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
config BLK_DEV_NVME
tristate "NVM Express block device"
@@ -42,6 +41,7 @@ config NVME_HWMON
config NVME_FABRICS
select NVME_CORE
+ select NVME_KEYRING if NVME_TCP_TLS
tristate
config NVME_RDMA
@@ -92,18 +92,27 @@ config NVME_TCP
If unsure, say N.
-config NVME_AUTH
- bool "NVM Express over Fabrics In-Band Authentication"
+config NVME_TCP_TLS
+ bool "NVMe over Fabrics TCP TLS encryption support"
+ depends on NVME_TCP
+ select NET_HANDSHAKE
+ select KEYS
+ help
+ Enables TLS encryption for NVMe TCP using the netlink handshake API.
+
+ The TLS handshake daemon is availble at
+ https://github.com/oracle/ktls-utils.
+
+ If unsure, say N.
+
+config NVME_HOST_AUTH
+ bool "NVMe over Fabrics In-Band Authentication in host side"
depends on NVME_CORE
- select NVME_COMMON
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_SHA256
- select CRYPTO_SHA512
- select CRYPTO_DH
- select CRYPTO_DH_RFC7919_GROUPS
+ select NVME_AUTH
+ select NVME_KEYRING if NVME_TCP_TLS
help
- This provides support for NVMe over Fabrics In-Band Authentication.
+ This provides support for NVMe over Fabrics In-Band Authentication in
+ host side.
If unsure, say N.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index c7c3cf202d12..6414ec968f99 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -17,7 +17,7 @@ nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
-nvme-core-$(CONFIG_NVME_AUTH) += auth.o
+nvme-core-$(CONFIG_NVME_HOST_AUTH) += auth.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 596bb11eeba5..b1387dc459a3 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -797,6 +797,7 @@ static int apple_nvme_init_request(struct blk_mq_tag_set *set,
static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
bool dead = false, freeze = false;
unsigned long flags;
@@ -808,8 +809,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
if (csts & NVME_CSTS_CFS)
dead = true;
- if (anv->ctrl.state == NVME_CTRL_LIVE ||
- anv->ctrl.state == NVME_CTRL_RESETTING) {
+ if (state == NVME_CTRL_LIVE ||
+ state == NVME_CTRL_RESETTING) {
freeze = true;
nvme_start_freeze(&anv->ctrl);
}
@@ -881,7 +882,7 @@ static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
unsigned long flags;
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
- if (anv->ctrl.state != NVME_CTRL_LIVE) {
+ if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) {
/*
* From rdma.c:
* If we are resetting, connecting or deleting we should
@@ -985,10 +986,10 @@ static void apple_nvme_reset_work(struct work_struct *work)
u32 boot_status, aqa;
struct apple_nvme *anv =
container_of(work, struct apple_nvme, ctrl.reset_work);
+ enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
- if (anv->ctrl.state != NVME_CTRL_RESETTING) {
- dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
- anv->ctrl.state);
+ if (state != NVME_CTRL_RESETTING) {
+ dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state);
ret = -ENODEV;
goto out;
}
@@ -1387,7 +1388,7 @@ static void devm_apple_nvme_mempool_destroy(void *data)
mempool_destroy(data);
}
-static int apple_nvme_probe(struct platform_device *pdev)
+static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct apple_nvme *anv;
@@ -1395,7 +1396,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
if (!anv)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
anv->dev = get_device(dev);
anv->adminq.is_adminq = true;
@@ -1515,10 +1516,30 @@ static int apple_nvme_probe(struct platform_device *pdev)
goto put_dev;
}
- anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
+ return anv;
+put_dev:
+ put_device(anv->dev);
+ return ERR_PTR(ret);
+}
+
+static int apple_nvme_probe(struct platform_device *pdev)
+{
+ struct apple_nvme *anv;
+ int ret;
+
+ anv = apple_nvme_alloc(pdev);
+ if (IS_ERR(anv))
+ return PTR_ERR(anv);
+
+ ret = nvme_add_ctrl(&anv->ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
+ anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
if (IS_ERR(anv->ctrl.admin_q)) {
ret = -ENOMEM;
- goto put_dev;
+ anv->ctrl.admin_q = NULL;
+ goto out_uninit_ctrl;
}
nvme_reset_ctrl(&anv->ctrl);
@@ -1526,12 +1547,14 @@ static int apple_nvme_probe(struct platform_device *pdev)
return 0;
-put_dev:
- put_device(anv->dev);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&anv->ctrl);
+out_put_ctrl:
+ nvme_put_ctrl(&anv->ctrl);
return ret;
}
-static int apple_nvme_remove(struct platform_device *pdev)
+static void apple_nvme_remove(struct platform_device *pdev)
{
struct apple_nvme *anv = platform_get_drvdata(pdev);
@@ -1546,8 +1569,6 @@ static int apple_nvme_remove(struct platform_device *pdev)
apple_rtkit_shutdown(anv->rtk);
apple_nvme_detach_genpd(anv);
-
- return 0;
}
static void apple_nvme_shutdown(struct platform_device *pdev)
@@ -1597,10 +1618,11 @@ static struct platform_driver apple_nvme_driver = {
.pm = pm_sleep_ptr(&apple_nvme_pm_ops),
},
.probe = apple_nvme_probe,
- .remove = apple_nvme_remove,
+ .remove_new = apple_nvme_remove,
.shutdown = apple_nvme_shutdown,
};
module_platform_driver(apple_nvme_driver);
MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple ANS NVM Express device driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index daf5d144a8ea..371e14f0a203 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -23,11 +23,13 @@ struct nvme_dhchap_queue_context {
struct nvme_ctrl *ctrl;
struct crypto_shash *shash_tfm;
struct crypto_kpp *dh_tfm;
+ struct nvme_dhchap_key *transformed_key;
void *buf;
int qid;
int error;
u32 s1;
u32 s2;
+ bool bi_directional;
u16 transaction;
u8 status;
u8 dhgroup_id;
@@ -36,7 +38,6 @@ struct nvme_dhchap_queue_context {
u8 c1[64];
u8 c2[64];
u8 response[64];
- u8 *host_response;
u8 *ctrl_key;
u8 *host_key;
u8 *sess_key;
@@ -47,11 +48,6 @@ struct nvme_dhchap_queue_context {
static struct workqueue_struct *nvme_auth_wq;
-#define nvme_auth_flags_from_qid(qid) \
- (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
-#define nvme_auth_queue_from_qid(ctrl, qid) \
- (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
-
static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
{
return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
@@ -62,10 +58,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
void *data, size_t data_len, bool auth_send)
{
struct nvme_command cmd = {};
- blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
- struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+ nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
+ struct request_queue *q = ctrl->fabrics_q;
int ret;
+ if (qid != 0) {
+ flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
+ q = ctrl->connect_q;
+ }
+
cmd.auth_common.opcode = nvme_fabrics_command;
cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
cmd.auth_common.spsp0 = 0x01;
@@ -79,8 +80,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
}
ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
- qid == 0 ? NVME_QID_ANY : qid,
- 0, flags);
+ qid == 0 ? NVME_QID_ANY : qid, flags);
if (ret > 0)
dev_warn(ctrl->device,
"qid %d auth_send failed with status %d\n", qid, ret);
@@ -312,17 +312,17 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
data->dhvlen = cpu_to_le16(chap->host_key_len);
memcpy(data->rval, chap->response, chap->hash_len);
if (ctrl->ctrl_key) {
+ chap->bi_directional = true;
get_random_bytes(chap->c2, chap->hash_len);
data->cvalid = 1;
- chap->s2 = nvme_auth_get_seqnum();
memcpy(data->rval + chap->hash_len, chap->c2,
chap->hash_len);
dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
__func__, chap->qid, (int)chap->hash_len, chap->c2);
} else {
memset(chap->c2, 0, chap->hash_len);
- chap->s2 = 0;
}
+ chap->s2 = nvme_auth_get_seqnum();
data->seqnum = cpu_to_le32(chap->s2);
if (chap->host_key_len) {
dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
@@ -339,10 +339,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
struct nvme_dhchap_queue_context *chap)
{
struct nvmf_auth_dhchap_success1_data *data = chap->buf;
- size_t size = sizeof(*data);
-
- if (chap->ctrl_key)
- size += chap->hash_len;
+ size_t size = sizeof(*data) + chap->hash_len;
if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
@@ -428,12 +425,12 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
__func__, chap->qid, chap->s1, chap->transaction);
- if (!chap->host_response) {
- chap->host_response = nvme_auth_transform_key(ctrl->host_key,
+ if (!chap->transformed_key) {
+ chap->transformed_key = nvme_auth_transform_key(ctrl->host_key,
ctrl->opts->host->nqn);
- if (IS_ERR(chap->host_response)) {
- ret = PTR_ERR(chap->host_response);
- chap->host_response = NULL;
+ if (IS_ERR(chap->transformed_key)) {
+ ret = PTR_ERR(chap->transformed_key);
+ chap->transformed_key = NULL;
return ret;
}
} else {
@@ -442,7 +439,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
}
ret = crypto_shash_setkey(chap->shash_tfm,
- chap->host_response, ctrl->host_key->len);
+ chap->transformed_key->key, chap->transformed_key->len);
if (ret) {
dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
chap->qid, ret);
@@ -508,19 +505,19 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
struct nvme_dhchap_queue_context *chap)
{
SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
- u8 *ctrl_response;
+ struct nvme_dhchap_key *transformed_key;
u8 buf[4], *challenge = chap->c2;
int ret;
- ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
ctrl->opts->subsysnqn);
- if (IS_ERR(ctrl_response)) {
- ret = PTR_ERR(ctrl_response);
+ if (IS_ERR(transformed_key)) {
+ ret = PTR_ERR(transformed_key);
return ret;
}
ret = crypto_shash_setkey(chap->shash_tfm,
- ctrl_response, ctrl->ctrl_key->len);
+ transformed_key->key, transformed_key->len);
if (ret) {
dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
chap->qid, ret);
@@ -586,7 +583,7 @@ static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
out:
if (challenge != chap->c2)
kfree(challenge);
- kfree(ctrl_response);
+ nvme_auth_free_key(transformed_key);
return ret;
}
@@ -648,8 +645,8 @@ gen_sesskey:
static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
{
- kfree_sensitive(chap->host_response);
- chap->host_response = NULL;
+ nvme_auth_free_key(chap->transformed_key);
+ chap->transformed_key = NULL;
kfree_sensitive(chap->host_key);
chap->host_key = NULL;
chap->host_key_len = 0;
@@ -663,6 +660,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
chap->error = 0;
chap->s1 = 0;
chap->s2 = 0;
+ chap->bi_directional = false;
chap->transaction = 0;
memset(chap->c1, 0, sizeof(chap->c1));
memset(chap->c2, 0, sizeof(chap->c2));
@@ -732,7 +730,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
if (ret) {
chap->status = ret;
- chap->error = -ECONNREFUSED;
+ chap->error = -EKEYREJECTED;
return;
}
@@ -758,12 +756,11 @@ static void nvme_queue_auth_work(struct work_struct *work)
__func__, chap->qid);
mutex_lock(&ctrl->dhchap_auth_mutex);
ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
if (ret) {
- mutex_unlock(&ctrl->dhchap_auth_mutex);
chap->error = ret;
goto fail2;
}
- mutex_unlock(&ctrl->dhchap_auth_mutex);
/* DH-HMAC-CHAP Step 3: send reply */
dev_dbg(ctrl->device, "%s: qid %d send reply\n",
@@ -800,7 +797,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
if (ret) {
chap->status = ret;
- chap->error = -ECONNREFUSED;
+ chap->error = -EKEYREJECTED;
return;
}
@@ -821,11 +818,11 @@ static void nvme_queue_auth_work(struct work_struct *work)
ret = nvme_auth_process_dhchap_success1(ctrl, chap);
if (ret) {
/* Controller authentication failed */
- chap->error = -ECONNREFUSED;
+ chap->error = -EKEYREJECTED;
goto fail2;
}
- if (chap->ctrl_key) {
+ if (chap->bi_directional) {
/* DH-HMAC-CHAP Step 5: send success2 */
dev_dbg(ctrl->device, "%s: qid %d send success2\n",
__func__, chap->qid);
@@ -840,6 +837,8 @@ static void nvme_queue_auth_work(struct work_struct *work)
}
fail2:
+ if (chap->status == 0)
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
__func__, chap->qid, chap->status);
tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
@@ -897,7 +896,7 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
* If the ctrl is no connected, bail as reconnect will handle
* authentication.
*/
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return;
/* Authenticate admin queue first */
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 20f46c230885..2b9e6cfaf2a8 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -171,15 +171,15 @@ static const char * const nvme_statuses[] = {
[NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
};
-const unsigned char *nvme_get_error_status_str(u16 status)
+const char *nvme_get_error_status_str(u16 status)
{
- status &= 0x7ff;
+ status &= NVME_SCT_SC_MASK;
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
- return nvme_statuses[status & 0x7ff];
+ return nvme_statuses[status];
return "Unknown";
}
-const unsigned char *nvme_get_opcode_str(u8 opcode)
+const char *nvme_get_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
return nvme_ops[opcode];
@@ -187,7 +187,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
}
EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
-const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+const char *nvme_get_admin_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
return nvme_admin_ops[opcode];
@@ -195,7 +195,7 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
}
EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);
-const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) {
+const char *nvme_get_fabrics_opcode_str(u8 opcode) {
if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
return nvme_fabrics_ops[opcode];
return "Unknown";
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f3a01b79148c..ba6508455e18 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011-2014, Intel Corporation.
*/
+#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-integrity.h>
@@ -20,6 +21,7 @@
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
#include <linux/pm_qos.h>
+#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include "nvme.h"
@@ -35,6 +37,7 @@ struct nvme_ns_info {
struct nvme_ns_ids ids;
u32 nsid;
__le32 anagrpid;
+ u8 pi_offset;
bool is_shared;
bool is_readonly;
bool is_ready;
@@ -109,16 +112,25 @@ struct workqueue_struct *nvme_delete_wq;
EXPORT_SYMBOL_GPL(nvme_delete_wq);
static LIST_HEAD(nvme_subsystems);
-static DEFINE_MUTEX(nvme_subsystems_lock);
+DEFINE_MUTEX(nvme_subsystems_lock);
static DEFINE_IDA(nvme_instance_ida);
static dev_t nvme_ctrl_base_chr_devt;
-static struct class *nvme_class;
-static struct class *nvme_subsys_class;
+static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env);
+static const struct class nvme_class = {
+ .name = "nvme",
+ .dev_uevent = nvme_class_uevent,
+};
+
+static const struct class nvme_subsys_class = {
+ .name = "nvme-subsystem",
+};
static DEFINE_IDA(nvme_ns_chr_minor_ida);
static dev_t nvme_ns_chr_devt;
-static struct class *nvme_ns_chr_class;
+static const struct class nvme_ns_chr_class = {
+ .name = "nvme-generic",
+};
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
@@ -131,7 +143,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
/*
* Only new queue scan work when admin and IO queues are both alive
*/
- if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
queue_work(nvme_wq, &ctrl->scan_work);
}
@@ -143,7 +155,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
*/
int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
{
- if (ctrl->state != NVME_CTRL_RESETTING)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
return -EBUSY;
if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
return -EBUSY;
@@ -156,7 +168,7 @@ static void nvme_failfast_work(struct work_struct *work)
struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvme_ctrl, failfast_work);
- if (ctrl->state != NVME_CTRL_CONNECTING)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
return;
set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
@@ -200,7 +212,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
ret = nvme_reset_ctrl(ctrl);
if (!ret) {
flush_work(&ctrl->reset_work);
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
ret = -ENETRESET;
}
@@ -251,7 +263,7 @@ void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
static blk_status_t nvme_error_status(u16 status)
{
- switch (status & 0x7ff) {
+ switch (status & NVME_SCT_SC_MASK) {
case NVME_SC_SUCCESS:
return BLK_STS_OK;
case NVME_SC_CAP_EXCEEDED:
@@ -297,7 +309,7 @@ static void nvme_retry_req(struct request *req)
u16 crd;
/* The mask and shift result must be <= 3 */
- crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
+ crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11;
if (crd)
delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
@@ -312,17 +324,17 @@ static void nvme_log_error(struct request *req)
struct nvme_request *nr = nvme_req(req);
if (ns) {
- pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
+ pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
ns->disk ? ns->disk->disk_name : "?",
nvme_get_opcode_str(nr->cmd->common.opcode),
nr->cmd->common.opcode,
- (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
- (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
+ nvme_sect_to_lba(ns->head, blk_rq_pos(req)),
+ blk_rq_bytes(req) >> ns->head->lba_shift,
nvme_get_error_status_str(nr->status),
- nr->status >> 8 & 7, /* Status Code Type */
- nr->status & 0xff, /* Status Code */
- nr->status & NVME_SC_MORE ? "MORE " : "",
- nr->status & NVME_SC_DNR ? "DNR " : "");
+ NVME_SCT(nr->status), /* Status Code Type */
+ nr->status & NVME_SC_MASK, /* Status Code */
+ nr->status & NVME_STATUS_MORE ? "MORE " : "",
+ nr->status & NVME_STATUS_DNR ? "DNR " : "");
return;
}
@@ -331,10 +343,34 @@ static void nvme_log_error(struct request *req)
nvme_get_admin_opcode_str(nr->cmd->common.opcode),
nr->cmd->common.opcode,
nvme_get_error_status_str(nr->status),
- nr->status >> 8 & 7, /* Status Code Type */
- nr->status & 0xff, /* Status Code */
- nr->status & NVME_SC_MORE ? "MORE " : "",
- nr->status & NVME_SC_DNR ? "DNR " : "");
+ NVME_SCT(nr->status), /* Status Code Type */
+ nr->status & NVME_SC_MASK, /* Status Code */
+ nr->status & NVME_STATUS_MORE ? "MORE " : "",
+ nr->status & NVME_STATUS_DNR ? "DNR " : "");
+}
+
+static void nvme_log_err_passthru(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ struct nvme_request *nr = nvme_req(req);
+
+ pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
+ "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n",
+ ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
+ ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
+ nvme_get_admin_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ nvme_get_error_status_str(nr->status),
+ NVME_SCT(nr->status), /* Status Code Type */
+ nr->status & NVME_SC_MASK, /* Status Code */
+ nr->status & NVME_STATUS_MORE ? "MORE " : "",
+ nr->status & NVME_STATUS_DNR ? "DNR " : "",
+ nr->cmd->common.cdw10,
+ nr->cmd->common.cdw11,
+ nr->cmd->common.cdw12,
+ nr->cmd->common.cdw13,
+ nr->cmd->common.cdw14,
+ nr->cmd->common.cdw14);
}
enum nvme_disposition {
@@ -349,14 +385,14 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
if (likely(nvme_req(req)->status == 0))
return COMPLETE;
- if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
- return AUTHENTICATE;
-
if (blk_noretry_request(req) ||
- (nvme_req(req)->status & NVME_SC_DNR) ||
+ (nvme_req(req)->status & NVME_STATUS_DNR) ||
nvme_req(req)->retries >= nvme_max_retries)
return COMPLETE;
+ if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED)
+ return AUTHENTICATE;
+
if (req->cmd_flags & REQ_NVME_MPATH) {
if (nvme_is_path_error(nvme_req(req)->status) ||
blk_queue_dying(req->q))
@@ -372,21 +408,33 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
static inline void nvme_end_req_zoned(struct request *req)
{
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = nvme_lba_to_sect(req->q->queuedata,
+ req_op(req) == REQ_OP_ZONE_APPEND) {
+ struct nvme_ns *ns = req->q->queuedata;
+
+ req->__sector = nvme_lba_to_sect(ns->head,
le64_to_cpu(nvme_req(req)->result.u64));
+ }
}
-static inline void nvme_end_req(struct request *req)
+static inline void __nvme_end_req(struct request *req)
{
- blk_status_t status = nvme_error_status(nvme_req(req)->status);
-
- if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
- nvme_log_error(req);
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
nvme_mpath_end_request(req);
+}
+
+void nvme_end_req(struct request *req)
+{
+ blk_status_t status = nvme_error_status(nvme_req(req)->status);
+
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
+ if (blk_rq_is_passthrough(req))
+ nvme_log_err_passthru(req);
+ else
+ nvme_log_error(req);
+ }
+ __nvme_end_req(req);
blk_mq_end_request(req, status);
}
@@ -420,7 +468,7 @@ void nvme_complete_rq(struct request *req)
nvme_failover_req(req);
return;
case AUTHENTICATE:
-#ifdef CONFIG_NVME_AUTH
+#ifdef CONFIG_NVME_HOST_AUTH
queue_work(nvme_wq, &ctrl->dhchap_auth_work);
nvme_retry_req(req);
#else
@@ -435,7 +483,7 @@ void nvme_complete_batch_req(struct request *req)
{
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
- nvme_end_req_zoned(req);
+ __nvme_end_req(req);
}
EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
@@ -499,7 +547,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
spin_lock_irqsave(&ctrl->lock, flags);
- old_state = ctrl->state;
+ old_state = nvme_ctrl_state(ctrl);
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
@@ -567,7 +615,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
}
if (changed) {
- ctrl->state = new_state;
+ WRITE_ONCE(ctrl->state, new_state);
wake_up_all(&ctrl->state_wq);
}
@@ -575,11 +623,11 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
if (!changed)
return false;
- if (ctrl->state == NVME_CTRL_LIVE) {
+ if (new_state == NVME_CTRL_LIVE) {
if (old_state == NVME_CTRL_CONNECTING)
nvme_stop_failfast_work(ctrl);
nvme_kick_requeue_lists(ctrl);
- } else if (ctrl->state == NVME_CTRL_CONNECTING &&
+ } else if (new_state == NVME_CTRL_CONNECTING &&
old_state == NVME_CTRL_RESETTING) {
nvme_start_failfast_work(ctrl);
}
@@ -588,27 +636,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
/*
- * Returns true for sink states that can't ever transition back to live.
- */
-static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
-{
- switch (ctrl->state) {
- case NVME_CTRL_NEW:
- case NVME_CTRL_LIVE:
- case NVME_CTRL_RESETTING:
- case NVME_CTRL_CONNECTING:
- return false;
- case NVME_CTRL_DELETING:
- case NVME_CTRL_DELETING_NOIO:
- case NVME_CTRL_DEAD:
- return true;
- default:
- WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
- return true;
- }
-}
-
-/*
* Waits for the controller state to be resetting, or returns false if it is
* not possible to ever transition to that state.
*/
@@ -617,7 +644,7 @@ bool nvme_wait_reset(struct nvme_ctrl *ctrl)
wait_event(ctrl->state_wq,
nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
nvme_state_terminal(ctrl));
- return ctrl->state == NVME_CTRL_RESETTING;
+ return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
}
EXPORT_SYMBOL_GPL(nvme_wait_reset);
@@ -653,7 +680,7 @@ static void nvme_free_ns(struct kref *kref)
kfree(ns);
}
-static inline bool nvme_get_ns(struct nvme_ns *ns)
+bool nvme_get_ns(struct nvme_ns *ns)
{
return kref_get_unless_zero(&ns->kref);
}
@@ -675,10 +702,21 @@ static inline void nvme_clear_nvme_request(struct request *req)
/* initialize a passthrough request */
void nvme_init_request(struct request *req, struct nvme_command *cmd)
{
- if (req->q->queuedata)
+ struct nvme_request *nr = nvme_req(req);
+ bool logging_enabled;
+
+ if (req->q->queuedata) {
+ struct nvme_ns *ns = req->q->disk->private_data;
+
+ logging_enabled = ns->head->passthru_err_log_enabled;
req->timeout = NVME_IO_TIMEOUT;
- else /* no queuedata implies admin queue */
+ } else { /* no queuedata implies admin queue */
+ logging_enabled = nr->ctrl->passthru_err_log_enabled;
req->timeout = NVME_ADMIN_TIMEOUT;
+ }
+
+ if (!logging_enabled)
+ req->rq_flags |= RQF_QUIET;
/* passthru commands should let the driver set the SGL flags */
cmd->common.flags &= ~NVME_CMD_SGL_ALL;
@@ -687,8 +725,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
if (req->mq_hctx->type == HCTX_TYPE_POLL)
req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req);
- req->rq_flags |= RQF_QUIET;
- memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
+ memcpy(nr->cmd, cmd, sizeof(*cmd));
}
EXPORT_SYMBOL_GPL(nvme_init_request);
@@ -704,9 +741,11 @@ EXPORT_SYMBOL_GPL(nvme_init_request);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq)
{
- if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
- ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DEAD &&
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (state != NVME_CTRL_DELETING_NOIO &&
+ state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DEAD &&
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
@@ -715,7 +754,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live)
+ bool queue_live, enum nvme_ctrl_state state)
{
struct nvme_request *req = nvme_req(rq);
@@ -736,7 +775,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
* command, which is require to set the queue live in the
* appropinquate states.
*/
- switch (ctrl->state) {
+ switch (state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
(req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
@@ -791,8 +830,8 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
}
if (queue_max_discard_segments(req->q) == 1) {
- u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
- u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
+ u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req));
+ u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9);
range[0].cattr = cpu_to_le32(0);
range[0].nlb = cpu_to_le32(nlb);
@@ -800,8 +839,9 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
n = 1;
} else {
__rq_for_each_bio(bio, req) {
- u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
- u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+ u64 slba = nvme_sect_to_lba(ns->head,
+ bio->bi_iter.bi_sector);
+ u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift;
if (n < segments) {
range[n].cattr = cpu_to_le32(0);
@@ -839,7 +879,7 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
u64 ref48;
/* both rw and write zeroes share the same reftag format */
- switch (ns->guard_type) {
+ switch (ns->head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
break;
@@ -867,17 +907,18 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->write_zeroes.slba =
- cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
+ cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
cmnd->write_zeroes.length =
- cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+ cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
- if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC))
+ if (!(req->cmd_flags & REQ_NOUNMAP) &&
+ (ns->head->features & NVME_NS_DEAC))
cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
- if (nvme_ns_has_pi(ns)) {
+ if (nvme_ns_has_pi(ns->head)) {
cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
- switch (ns->pi_type) {
+ switch (ns->head->pi_type) {
case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2:
nvme_set_ref_tag(ns, cmnd, req);
@@ -888,6 +929,36 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
return BLK_STS_OK;
}
+/*
+ * NVMe does not support a dedicated command to issue an atomic write. A write
+ * which does adhere to the device atomic limits will silently be executed
+ * non-atomically. The request issuer should ensure that the write is within
+ * the queue atomic writes limits, but just validate this in case it is not.
+ */
+static bool nvme_valid_atomic_write(struct request *req)
+{
+ struct request_queue *q = req->q;
+ u32 boundary_bytes = queue_atomic_write_boundary_bytes(q);
+
+ if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q))
+ return false;
+
+ if (boundary_bytes) {
+ u64 mask = boundary_bytes - 1, imask = ~mask;
+ u64 start = blk_rq_pos(req) << SECTOR_SHIFT;
+ u64 end = start + blk_rq_bytes(req) - 1;
+
+ /* If greater then must be crossing a boundary */
+ if (blk_rq_bytes(req) > boundary_bytes)
+ return false;
+
+ if ((start & imask) != (end & imask))
+ return false;
+ }
+
+ return true;
+}
+
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd,
enum nvme_opcode op)
@@ -903,19 +974,24 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+ if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req))
+ return BLK_STS_INVAL;
+
cmnd->rw.opcode = op;
cmnd->rw.flags = 0;
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->rw.cdw2 = 0;
cmnd->rw.cdw3 = 0;
cmnd->rw.metadata = 0;
- cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
- cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+ cmnd->rw.slba =
+ cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
+ cmnd->rw.length =
+ cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
cmnd->rw.reftag = 0;
- cmnd->rw.apptag = 0;
- cmnd->rw.appmask = 0;
+ cmnd->rw.lbat = 0;
+ cmnd->rw.lbatm = 0;
- if (ns->ms) {
+ if (ns->head->ms) {
/*
* If formated with metadata, the block layer always provides a
* metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
@@ -923,12 +999,12 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
* namespace capacity to zero to prevent any I/O.
*/
if (!blk_integrity_rq(req)) {
- if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
+ if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
return BLK_STS_NOTSUPP;
control |= NVME_RW_PRINFO_PRACT;
}
- switch (ns->pi_type) {
+ switch (ns->head->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
control |= NVME_RW_PRINFO_PRCHK_GUARD;
break;
@@ -957,6 +1033,7 @@ void nvme_cleanup_cmd(struct request *req)
clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(bvec_virt(&req->special_vec));
+ req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
@@ -1041,20 +1118,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int qid, int at_head, blk_mq_req_flags_t flags)
+ int qid, nvme_submit_flags_t flags)
{
struct request *req;
int ret;
+ blk_mq_req_flags_t blk_flags = 0;
+ if (flags & NVME_SUBMIT_NOWAIT)
+ blk_flags |= BLK_MQ_REQ_NOWAIT;
+ if (flags & NVME_SUBMIT_RESERVED)
+ blk_flags |= BLK_MQ_REQ_RESERVED;
if (qid == NVME_QID_ANY)
- req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
else
- req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
nvme_init_request(req, cmd);
+ if (flags & NVME_SUBMIT_RETRY)
+ req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -1062,7 +1146,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- ret = nvme_execute_rq(req, at_head);
+ ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD);
if (result && ret >= 0)
*result = nvme_req(req)->result;
out:
@@ -1075,7 +1159,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1098,6 +1182,10 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
} else {
effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+
+ /* Ignore execution restrictions if any relaxation bits are set */
+ if (effects & NVME_CMD_EFFECTS_CSER_MASK)
+ effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
}
return effects;
@@ -1171,7 +1259,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
/*
* Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
- *
+ *
* The host should send Keep Alive commands at half of the Keep Alive Timeout
* accounting for transport roundtrip times [..].
*/
@@ -1192,8 +1280,16 @@ static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
{
- queue_delayed_work(nvme_wq, &ctrl->ka_work,
- nvme_keep_alive_work_period(ctrl));
+ unsigned long now = jiffies;
+ unsigned long delay = nvme_keep_alive_work_period(ctrl);
+ unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay;
+
+ if (time_after(now, ka_next_check_tm))
+ delay = 0;
+ else
+ delay = ka_next_check_tm - now;
+
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
}
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
@@ -1331,8 +1427,10 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
sizeof(struct nvme_id_ctrl));
- if (error)
+ if (error) {
kfree(*id);
+ *id = NULL;
+ }
return error;
}
@@ -1442,7 +1540,7 @@ free_data:
return status;
}
-static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_id_ns **id)
{
struct nvme_command c = { };
@@ -1461,6 +1559,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (error) {
dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
kfree(*id);
+ *id = NULL;
}
return error;
}
@@ -1479,7 +1578,8 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
if (id->ncap == 0) {
/* namespace not allocated or attached */
info->is_removed = true;
- return -ENODEV;
+ ret = -ENODEV;
+ goto error;
}
info->anagrpid = id->anagrpid;
@@ -1497,8 +1597,10 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
}
+
+error:
kfree(id);
- return 0;
+ return ret;
}
static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
@@ -1539,7 +1641,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, NVME_QID_ANY, 0, 0);
+ buffer, buflen, NVME_QID_ANY, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1657,92 +1759,87 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
- u32 max_integrity_segments)
+static bool nvme_init_integrity(struct nvme_ns_head *head,
+ struct queue_limits *lim, struct nvme_ns_info *info)
{
- struct blk_integrity integrity = { };
+ struct blk_integrity *bi = &lim->integrity;
+
+ memset(bi, 0, sizeof(*bi));
+
+ if (!head->ms)
+ return true;
+
+ /*
+ * PI can always be supported as we can ask the controller to simply
+ * insert/strip it, which is not possible for other kinds of metadata.
+ */
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) ||
+ !(head->features & NVME_NS_METADATA_SUPPORTED))
+ return nvme_ns_has_pi(head);
- switch (ns->pi_type) {
+ switch (head->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
- switch (ns->guard_type) {
+ switch (head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
- integrity.profile = &t10_pi_type3_crc;
- integrity.tag_size = sizeof(u16) + sizeof(u32);
- integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
+ bi->tag_size = sizeof(u16) + sizeof(u32);
+ bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
case NVME_NVM_NS_64B_GUARD:
- integrity.profile = &ext_pi_type3_crc64;
- integrity.tag_size = sizeof(u16) + 6;
- integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
+ bi->tag_size = sizeof(u16) + 6;
+ bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
default:
- integrity.profile = NULL;
break;
}
break;
case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2:
- switch (ns->guard_type) {
+ switch (head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
- integrity.profile = &t10_pi_type1_crc;
- integrity.tag_size = sizeof(u16);
- integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
+ bi->tag_size = sizeof(u16);
+ bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
+ BLK_INTEGRITY_REF_TAG;
break;
case NVME_NVM_NS_64B_GUARD:
- integrity.profile = &ext_pi_type1_crc64;
- integrity.tag_size = sizeof(u16);
- integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
+ bi->tag_size = sizeof(u16);
+ bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
+ BLK_INTEGRITY_REF_TAG;
break;
default:
- integrity.profile = NULL;
break;
}
break;
default:
- integrity.profile = NULL;
break;
}
- integrity.tuple_size = ns->ms;
- blk_integrity_register(disk, &integrity);
- blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
-}
-#else
-static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
- u32 max_integrity_segments)
-{
+ bi->tuple_size = head->ms;
+ bi->pi_offset = info->pi_offset;
+ return true;
}
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
+static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
{
struct nvme_ctrl *ctrl = ns->ctrl;
- struct request_queue *queue = disk->queue;
- u32 size = queue_logical_block_size(queue);
-
- if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
- ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
- if (ctrl->max_discard_sectors == 0) {
- blk_queue_max_discard_sectors(queue, 0);
- return;
- }
-
- BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
- NVME_DSM_MAX_RANGES);
-
- queue->limits.discard_granularity = size;
-
- /* If discard is already enabled, don't reset queue limits */
- if (queue->limits.max_discard_sectors)
- return;
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
+ lim->max_hw_discard_sectors =
+ nvme_lba_to_sect(ns->head, ctrl->dmrsl);
+ else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
+ lim->max_hw_discard_sectors = UINT_MAX;
+ else
+ lim->max_hw_discard_sectors = 0;
- blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
- blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
+ lim->discard_granularity = lim->logical_block_size;
- if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
- blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
+ if (ctrl->dmrl)
+ lim->max_discard_segments = ctrl->dmrl;
+ else
+ lim->max_discard_segments = NVME_DSM_MAX_RANGES;
}
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
@@ -1753,76 +1850,80 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
a->csi == b->csi;
}
-static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid,
+ struct nvme_id_ns_nvm **nvmp)
{
- bool first = id->dps & NVME_NS_DPS_PI_FIRST;
- unsigned lbaf = nvme_lbaf_index(id->flbas);
- struct nvme_ctrl *ctrl = ns->ctrl;
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.nsid = cpu_to_le32(nsid),
+ .identify.cns = NVME_ID_CNS_CS_NS,
+ .identify.csi = NVME_CSI_NVM,
+ };
struct nvme_id_ns_nvm *nvm;
- int ret = 0;
- u32 elbaf;
-
- ns->pi_size = 0;
- ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
- if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
- ns->pi_size = sizeof(struct t10_pi_tuple);
- ns->guard_type = NVME_NVM_NS_16B_GUARD;
- goto set_pi;
- }
+ int ret;
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
if (!nvm)
return -ENOMEM;
- c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(ns->head->ns_id);
- c.identify.cns = NVME_ID_CNS_CS_NS;
- c.identify.csi = NVME_CSI_NVM;
-
- ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm));
if (ret)
- goto free_data;
+ kfree(nvm);
+ else
+ *nvmp = nvm;
+ return ret;
+}
- elbaf = le32_to_cpu(nvm->elbaf[lbaf]);
+static void nvme_configure_pi_elbas(struct nvme_ns_head *head,
+ struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm)
+{
+ u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]);
+ u8 guard_type;
/* no support for storage tag formats right now */
if (nvme_elbaf_sts(elbaf))
- goto free_data;
+ return;
- ns->guard_type = nvme_elbaf_guard_type(elbaf);
- switch (ns->guard_type) {
+ guard_type = nvme_elbaf_guard_type(elbaf);
+ if ((nvm->pic & NVME_ID_NS_NVM_QPIFS) &&
+ guard_type == NVME_NVM_NS_QTYPE_GUARD)
+ guard_type = nvme_elbaf_qualified_guard_type(elbaf);
+
+ head->guard_type = guard_type;
+ switch (head->guard_type) {
case NVME_NVM_NS_64B_GUARD:
- ns->pi_size = sizeof(struct crc64_pi_tuple);
+ head->pi_size = sizeof(struct crc64_pi_tuple);
break;
case NVME_NVM_NS_16B_GUARD:
- ns->pi_size = sizeof(struct t10_pi_tuple);
+ head->pi_size = sizeof(struct t10_pi_tuple);
break;
default:
break;
}
-
-free_data:
- kfree(nvm);
-set_pi:
- if (ns->pi_size && (first || ns->ms == ns->pi_size))
- ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
- else
- ns->pi_type = 0;
-
- return ret;
}
-static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head, struct nvme_id_ns *id,
+ struct nvme_id_ns_nvm *nvm, struct nvme_ns_info *info)
{
- struct nvme_ctrl *ctrl = ns->ctrl;
-
- if (nvme_init_ms(ns, id))
+ head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ head->pi_type = 0;
+ head->pi_size = 0;
+ head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms);
+ if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
return;
- ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
- if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- return;
+ if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
+ nvme_configure_pi_elbas(head, id, nvm);
+ } else {
+ head->pi_size = sizeof(struct t10_pi_tuple);
+ head->guard_type = NVME_NVM_NS_16B_GUARD;
+ }
+
+ if (head->pi_size && head->ms >= head->pi_size)
+ head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ if (!(id->dps & NVME_NS_DPS_PI_FIRST))
+ info->pi_offset = head->ms - head->pi_size;
if (ctrl->ops->flags & NVME_F_FABRICS) {
/*
@@ -1833,7 +1934,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
return;
- ns->features |= NVME_NS_EXT_LBAS;
+ head->features |= NVME_NS_EXT_LBAS;
/*
* The current fabrics transport drivers support namespace
@@ -1844,8 +1945,8 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* Note, this check will need to be modified if any drivers
* gain the ability to use other metadata formats.
*/
- if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
- ns->features |= NVME_NS_METADATA_SUPPORTED;
+ if (ctrl->max_integrity_segments && nvme_ns_has_pi(head))
+ head->features |= NVME_NS_METADATA_SUPPORTED;
} else {
/*
* For PCIe controllers, we can't easily remap the separate
@@ -1854,48 +1955,64 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* We allow extended LBAs for the passthrough interface, though.
*/
if (id->flbas & NVME_NS_FLBAS_META_EXT)
- ns->features |= NVME_NS_EXT_LBAS;
+ head->features |= NVME_NS_EXT_LBAS;
else
- ns->features |= NVME_NS_METADATA_SUPPORTED;
+ head->features |= NVME_NS_METADATA_SUPPORTED;
}
}
-static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
- struct request_queue *q)
-{
- bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
- if (ctrl->max_hw_sectors) {
- u32 max_segments =
- (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
+static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns,
+ struct nvme_id_ns *id, struct queue_limits *lim,
+ u32 bs, u32 atomic_bs)
+{
+ unsigned int boundary = 0;
- max_segments = min_not_zero(max_segments, ctrl->max_segments);
- blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
- blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
+ if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) {
+ if (le16_to_cpu(id->nabspf))
+ boundary = (le16_to_cpu(id->nabspf) + 1) * bs;
}
- blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, 3);
- blk_queue_write_cache(q, vwc, vwc);
+ lim->atomic_write_hw_max = atomic_bs;
+ lim->atomic_write_hw_boundary = boundary;
+ lim->atomic_write_hw_unit_min = bs;
+ lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
+}
+
+static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
+{
+ return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1;
+}
+
+static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
+ struct queue_limits *lim)
+{
+ lim->max_hw_sectors = ctrl->max_hw_sectors;
+ lim->max_segments = min_t(u32, USHRT_MAX,
+ min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments));
+ lim->max_integrity_segments = ctrl->max_integrity_segments;
+ lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1;
+ lim->max_segment_size = UINT_MAX;
+ lim->dma_alignment = 3;
}
-static void nvme_update_disk_info(struct gendisk *disk,
- struct nvme_ns *ns, struct nvme_id_ns *id)
+static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
+ struct queue_limits *lim)
{
- sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
- u32 bs = 1U << ns->lba_shift;
+ struct nvme_ns_head *head = ns->head;
+ u32 bs = 1U << head->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0;
+ bool valid = true;
/*
* The block layer can't support LBA sizes larger than the page size
- * yet, so catch this early and don't allow block I/O.
+ * or smaller than a sector size yet, so catch this early and don't
+ * allow block I/O.
*/
- if (ns->lba_shift > PAGE_SHIFT) {
- capacity = 0;
+ if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) {
bs = (1 << 9);
+ valid = false;
}
- blk_integrity_unregister(disk);
-
atomic_bs = phys_bs = bs;
if (id->nabo == 0) {
/*
@@ -1907,45 +2024,32 @@ static void nvme_update_disk_info(struct gendisk *disk,
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
+
+ nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs);
}
if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
/* NPWG = Namespace Preferred Write Granularity */
phys_bs = bs * (1 + le16_to_cpu(id->npwg));
/* NOWS = Namespace Optimal Write Size */
- io_opt = bs * (1 + le16_to_cpu(id->nows));
+ if (id->nows)
+ io_opt = bs * (1 + le16_to_cpu(id->nows));
}
- blk_queue_logical_block_size(disk->queue, bs);
/*
* Linux filesystems assume writing a single physical block is
* an atomic operation. Hence limit the physical block size to the
* value of the Atomic Write Unit Power Fail parameter.
*/
- blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
- blk_queue_io_min(disk->queue, phys_bs);
- blk_queue_io_opt(disk->queue, io_opt);
-
- /*
- * Register a metadata profile for PI, or the plain non-integrity NVMe
- * metadata masquerading as Type 0 if supported, otherwise reject block
- * I/O to namespaces with metadata except when the namespace supports
- * PI, as it can strip/insert in that case.
- */
- if (ns->ms) {
- if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
- (ns->features & NVME_NS_METADATA_SUPPORTED))
- nvme_init_integrity(disk, ns,
- ns->ctrl->max_integrity_segments);
- else if (!nvme_ns_has_pi(ns))
- capacity = 0;
- }
-
- set_capacity_and_notify(disk, capacity);
-
- nvme_config_discard(disk, ns);
- blk_queue_max_write_zeroes_sectors(disk->queue,
- ns->ctrl->max_zeroes_sectors);
+ lim->logical_block_size = bs;
+ lim->physical_block_size = min(phys_bs, atomic_bs);
+ lim->io_min = phys_bs;
+ lim->io_opt = io_opt;
+ if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
+ lim->max_write_zeroes_sectors = UINT_MAX;
+ else
+ lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
+ return valid;
}
static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
@@ -1959,7 +2063,8 @@ static inline bool nvme_first_scan(struct gendisk *disk)
return !disk_live(disk);
}
-static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
+static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id,
+ struct queue_limits *lim)
{
struct nvme_ctrl *ctrl = ns->ctrl;
u32 iob;
@@ -1968,7 +2073,7 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
is_power_of_2(ctrl->max_hw_sectors))
iob = ctrl->max_hw_sectors;
else
- iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
+ iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob));
if (!iob)
return;
@@ -1987,38 +2092,36 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
return;
}
- blk_queue_chunk_sectors(ns->queue, iob);
+ lim->chunk_sectors = iob;
}
static int nvme_update_ns_info_generic(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
+ struct queue_limits lim;
+ int ret;
+
blk_mq_freeze_queue(ns->disk->queue);
- nvme_set_queue_limits(ns->ctrl, ns->queue);
+ lim = queue_limits_start_update(ns->disk->queue);
+ nvme_set_ctrl_limits(ns->ctrl, &lim);
+ ret = queue_limits_commit_update(ns->disk->queue, &lim);
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
blk_mq_unfreeze_queue(ns->disk->queue);
- if (nvme_ns_head_multipath(ns->head)) {
- blk_mq_freeze_queue(ns->head->disk->queue);
- set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
- nvme_mpath_revalidate_paths(ns);
- blk_stack_limits(&ns->head->disk->queue->limits,
- &ns->queue->limits, 0);
- ns->head->disk->flags |= GENHD_FL_HIDDEN;
- blk_mq_unfreeze_queue(ns->head->disk->queue);
- }
-
/* Hide the block-interface for these devices */
- ns->disk->flags |= GENHD_FL_HIDDEN;
- set_bit(NVME_NS_READY, &ns->flags);
-
- return 0;
+ if (!ret)
+ ret = -ENODEV;
+ return ret;
}
static int nvme_update_ns_info_block(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
+ struct queue_limits lim;
+ struct nvme_id_ns_nvm *nvm = NULL;
+ struct nvme_zone_info zi = {};
struct nvme_id_ns *id;
+ sector_t capacity;
unsigned lbaf;
int ret;
@@ -2026,23 +2129,65 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (ret)
return ret;
- blk_mq_freeze_queue(ns->disk->queue);
+ if (id->ncap == 0) {
+ /* namespace not allocated or attached */
+ info->is_removed = true;
+ ret = -ENXIO;
+ goto out;
+ }
lbaf = nvme_lbaf_index(id->flbas);
- ns->lba_shift = id->lbaf[lbaf].ds;
- nvme_set_queue_limits(ns->ctrl, ns->queue);
- nvme_configure_metadata(ns, id);
- nvme_set_chunk_sectors(ns, id);
- nvme_update_disk_info(ns->disk, ns, id);
+ if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
+ ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
+ if (ret < 0)
+ goto out;
+ }
- if (ns->head->ids.csi == NVME_CSI_ZNS) {
- ret = nvme_update_zone_info(ns, lbaf);
- if (ret) {
- blk_mq_unfreeze_queue(ns->disk->queue);
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ ns->head->ids.csi == NVME_CSI_ZNS) {
+ ret = nvme_query_zone_info(ns, lbaf, &zi);
+ if (ret < 0)
goto out;
- }
}
+ blk_mq_freeze_queue(ns->disk->queue);
+ ns->head->lba_shift = id->lbaf[lbaf].ds;
+ ns->head->nuse = le64_to_cpu(id->nuse);
+ capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
+
+ lim = queue_limits_start_update(ns->disk->queue);
+ nvme_set_ctrl_limits(ns->ctrl, &lim);
+ nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
+ nvme_set_chunk_sectors(ns, id, &lim);
+ if (!nvme_update_disk_info(ns, id, &lim))
+ capacity = 0;
+ nvme_config_discard(ns, &lim);
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ ns->head->ids.csi == NVME_CSI_ZNS)
+ nvme_update_zone_info(ns, &lim, &zi);
+
+ if (ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
+ else
+ lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
+
+ /*
+ * Register a metadata profile for PI, or the plain non-integrity NVMe
+ * metadata masquerading as Type 0 if supported, otherwise reject block
+ * I/O to namespaces with metadata except when the namespace supports
+ * PI, as it can strip/insert in that case.
+ */
+ if (!nvme_init_integrity(ns->head, &lim, info))
+ capacity = 0;
+
+ ret = queue_limits_commit_update(ns->disk->queue, &lim);
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
+
+ set_capacity_and_notify(ns->disk, capacity);
+
/*
* Only set the DEAC bit if the device guarantees that reads from
* deallocated data return zeroes. While the DEAC bit does not
@@ -2050,62 +2195,129 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
* do not return zeroes.
*/
if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
- ns->features |= NVME_NS_DEAC;
+ ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
- ret = nvme_revalidate_zones(ns);
+ ret = blk_revalidate_disk_zones(ns->disk);
if (ret && !nvme_first_scan(ns->disk))
goto out;
}
- if (nvme_ns_head_multipath(ns->head)) {
- blk_mq_freeze_queue(ns->head->disk->queue);
- nvme_update_disk_info(ns->head->disk, ns, id);
- set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
- nvme_mpath_revalidate_paths(ns);
- blk_stack_limits(&ns->head->disk->queue->limits,
- &ns->queue->limits, 0);
- disk_update_readahead(ns->head->disk);
- blk_mq_unfreeze_queue(ns->head->disk->queue);
- }
-
ret = 0;
out:
- /*
- * If probing fails due an unsupported feature, hide the block device,
- * but still allow other access.
- */
- if (ret == -ENODEV) {
- ns->disk->flags |= GENHD_FL_HIDDEN;
- set_bit(NVME_NS_READY, &ns->flags);
- ret = 0;
- }
+ kfree(nvm);
kfree(id);
return ret;
}
static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
{
+ bool unsupported = false;
+ int ret;
+
switch (info->ids.csi) {
case NVME_CSI_ZNS:
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
dev_info(ns->ctrl->device,
"block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
info->nsid);
- return nvme_update_ns_info_generic(ns, info);
+ ret = nvme_update_ns_info_generic(ns, info);
+ break;
}
- return nvme_update_ns_info_block(ns, info);
+ ret = nvme_update_ns_info_block(ns, info);
+ break;
case NVME_CSI_NVM:
- return nvme_update_ns_info_block(ns, info);
+ ret = nvme_update_ns_info_block(ns, info);
+ break;
default:
dev_info(ns->ctrl->device,
"block device for nsid %u not supported (csi %u)\n",
info->nsid, info->ids.csi);
- return nvme_update_ns_info_generic(ns, info);
+ ret = nvme_update_ns_info_generic(ns, info);
+ break;
+ }
+
+ /*
+ * If probing fails due an unsupported feature, hide the block device,
+ * but still allow other access.
+ */
+ if (ret == -ENODEV) {
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+ unsupported = true;
+ ret = 0;
+ }
+
+ if (!ret && nvme_ns_head_multipath(ns->head)) {
+ struct queue_limits *ns_lim = &ns->disk->queue->limits;
+ struct queue_limits lim;
+
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ /*
+ * queue_limits mixes values that are the hardware limitations
+ * for bio splitting with what is the device configuration.
+ *
+ * For NVMe the device configuration can change after e.g. a
+ * Format command, and we really want to pick up the new format
+ * value here. But we must still stack the queue limits to the
+ * least common denominator for multipathing to split the bios
+ * properly.
+ *
+ * To work around this, we explicitly set the device
+ * configuration to those that we just queried, but only stack
+ * the splitting limits in to make sure we still obey possibly
+ * lower limitations of other controllers.
+ */
+ lim = queue_limits_start_update(ns->head->disk->queue);
+ lim.logical_block_size = ns_lim->logical_block_size;
+ lim.physical_block_size = ns_lim->physical_block_size;
+ lim.io_min = ns_lim->io_min;
+ lim.io_opt = ns_lim->io_opt;
+ queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
+ ns->head->disk->disk_name);
+ if (unsupported)
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ else
+ nvme_init_integrity(ns->head, &lim, info);
+ ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
+
+ set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
}
+
+ return ret;
+}
+
+int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
+ enum blk_unique_id type)
+{
+ struct nvme_ns_ids *ids = &ns->head->ids;
+
+ if (type != BLK_UID_EUI64)
+ return -EINVAL;
+
+ if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) {
+ memcpy(id, &ids->nguid, sizeof(ids->nguid));
+ return sizeof(ids->nguid);
+ }
+ if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) {
+ memcpy(id, &ids->eui64, sizeof(ids->eui64));
+ return sizeof(ids->eui64);
+ }
+
+ return -EINVAL;
+}
+
+static int nvme_get_unique_id(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id type)
+{
+ return nvme_ns_get_unique_id(disk->private_data, id, type);
}
#ifdef CONFIG_BLK_SED_OPAL
@@ -2124,7 +2336,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- NVME_QID_ANY, 1, 0);
+ NVME_QID_ANY, NVME_SUBMIT_AT_HEAD);
}
static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
@@ -2163,6 +2375,7 @@ const struct block_device_operations nvme_bdev_ops = {
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
+ .get_unique_id = nvme_get_unique_id,
.report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
@@ -2245,25 +2458,8 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
else
ctrl->ctrl_config = NVME_CC_CSS_NVM;
- if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
- u32 crto;
-
- ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
- if (ret) {
- dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
- ret);
- return ret;
- }
-
- if (ctrl->cap & NVME_CAP_CRMS_CRIMS) {
- ctrl->ctrl_config |= NVME_CC_CRIME;
- timeout = NVME_CRTO_CRIMT(crto);
- } else {
- timeout = NVME_CRTO_CRWMT(crto);
- }
- } else {
- timeout = NVME_CAP_TIMEOUT(ctrl->cap);
- }
+ if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
+ ctrl->ctrl_config |= NVME_CC_CRIME;
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
@@ -2272,11 +2468,39 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
if (ret)
return ret;
- /* Flush write to device (required if transport is PCI) */
- ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
+ /* CAP value may change after initial CC write */
+ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
if (ret)
return ret;
+ timeout = NVME_CAP_TIMEOUT(ctrl->cap);
+ if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
+ u32 crto, ready_timeout;
+
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
+ if (ret) {
+ dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * CRTO should always be greater or equal to CAP.TO, but some
+ * devices are known to get this wrong. Use the larger of the
+ * two values.
+ */
+ if (ctrl->ctrl_config & NVME_CC_CRIME)
+ ready_timeout = NVME_CRTO_CRIMT(crto);
+ else
+ ready_timeout = NVME_CRTO_CRWMT(crto);
+
+ if (ready_timeout < timeout)
+ dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
+ crto, ctrl->cap);
+ else
+ timeout = ready_timeout;
+ }
+
ctrl->ctrl_config |= NVME_CC_ENABLE;
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
@@ -2506,7 +2730,7 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
if (ctrl->ps_max_latency_us != latency) {
ctrl->ps_max_latency_us = latency;
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
nvme_configure_apst(ctrl);
}
}
@@ -2764,7 +2988,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
subsys->awupf = le16_to_cpu(id->awupf);
nvme_mpath_default_iopolicy(subsys);
- subsys->dev.class = nvme_subsys_class;
+ subsys->dev.class = &nvme_subsys_class;
subsys->dev.release = nvme_release_subsystem;
subsys->dev.groups = nvme_subsys_attrs_groups;
dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
@@ -2874,14 +3098,6 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
struct nvme_id_ctrl_nvm *id;
int ret;
- if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
- ctrl->max_discard_sectors = UINT_MAX;
- ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
- } else {
- ctrl->max_discard_sectors = 0;
- ctrl->max_discard_segments = 0;
- }
-
/*
* Even though NVMe spec explicitly states that MDTS is not applicable
* to the write-zeroes, we are cautious and limit the size to the
@@ -2911,8 +3127,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
if (ret)
goto free_data;
- if (id->dmrl)
- ctrl->max_discard_segments = id->dmrl;
+ ctrl->dmrl = id->dmrl;
ctrl->dmrsl = le32_to_cpu(id->dmrsl);
if (id->wzsl)
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
@@ -2980,8 +3195,50 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
return 0;
}
+static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ /*
+ * In fabrics we need to verify the cntlid matches the
+ * admin connect
+ */
+ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
+ dev_err(ctrl->device,
+ "Mismatching cntlid: Connect %u vs Identify %u, rejecting\n",
+ ctrl->cntlid, le16_to_cpu(id->cntlid));
+ return -EINVAL;
+ }
+
+ if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
+ dev_err(ctrl->device,
+ "keep-alive support is mandatory for fabrics\n");
+ return -EINVAL;
+ }
+
+ if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) {
+ dev_err(ctrl->device,
+ "I/O queue command capsule supported size %d < 4\n",
+ ctrl->ioccsz);
+ return -EINVAL;
+ }
+
+ if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) {
+ dev_err(ctrl->device,
+ "I/O queue response capsule supported size %d < 1\n",
+ ctrl->iorcsz);
+ return -EINVAL;
+ }
+
+ if (!ctrl->maxcmd) {
+ dev_err(ctrl->device, "Maximum outstanding commands is 0\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nvme_init_identify(struct nvme_ctrl *ctrl)
{
+ struct queue_limits lim;
struct nvme_id_ctrl *id;
u32 max_hw_sectors;
bool prev_apst_enabled;
@@ -3048,7 +3305,12 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->max_hw_sectors =
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
- nvme_set_queue_limits(ctrl, ctrl->admin_q);
+ lim = queue_limits_start_update(ctrl->admin_q);
+ nvme_set_ctrl_limits(ctrl, &lim);
+ ret = queue_limits_commit_update(ctrl->admin_q, &lim);
+ if (ret)
+ goto out_free;
+
ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas);
ctrl->max_namespaces = le32_to_cpu(id->mnan);
@@ -3066,7 +3328,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ctrl->shutdown_timeout != shutdown_timeout)
dev_info(ctrl->device,
- "Shutdown timeout set to %u seconds\n",
+ "D3 entry latency set to %u seconds\n",
ctrl->shutdown_timeout);
} else
ctrl->shutdown_timeout = shutdown_timeout;
@@ -3092,25 +3354,9 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->iorcsz = le32_to_cpu(id->iorcsz);
ctrl->maxcmd = le16_to_cpu(id->maxcmd);
- /*
- * In fabrics we need to verify the cntlid matches the
- * admin connect
- */
- if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
- dev_err(ctrl->device,
- "Mismatching cntlid: Connect %u vs Identify "
- "%u, rejecting\n",
- ctrl->cntlid, le16_to_cpu(id->cntlid));
- ret = -EINVAL;
- goto out_free;
- }
-
- if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
- dev_err(ctrl->device,
- "keep-alive support is mandatory for fabrics\n");
- ret = -EINVAL;
+ ret = nvme_check_ctrl_fabric_info(ctrl, id);
+ if (ret)
goto out_free;
- }
} else {
ctrl->hmpre = le32_to_cpu(id->hmpre);
ctrl->hmmin = le32_to_cpu(id->hmmin);
@@ -3183,6 +3429,8 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
ctrl->identified = true;
+ nvme_start_keep_alive(ctrl);
+
return 0;
}
EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
@@ -3192,7 +3440,7 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
struct nvme_ctrl *ctrl =
container_of(inode->i_cdev, struct nvme_ctrl, cdev);
- switch (ctrl->state) {
+ switch (nvme_ctrl_state(ctrl)) {
case NVME_CTRL_LIVE:
break;
default:
@@ -3294,7 +3542,7 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
if (minor < 0)
return minor;
cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
- cdev_device->class = nvme_ns_chr_class;
+ cdev_device->class = &nvme_ns_chr_class;
cdev_device->release = nvme_cdev_rel;
device_initialize(cdev_device);
cdev_init(cdev, fops);
@@ -3367,6 +3615,8 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head->ns_id = info->nsid;
head->ids = info->ids;
head->shared = info->is_shared;
+ ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1);
+ ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE);
kref_init(&head->ref);
if (head->ids.csi) {
@@ -3502,7 +3752,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
"Found shared namespace %d, but multipathing not supported.\n",
info->nsid);
dev_warn_once(ctrl->device,
- "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
+ "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n");
}
}
@@ -3521,9 +3771,10 @@ out_unlock:
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns, *ret = NULL;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
if (ns->head->ns_id == nsid) {
if (!nvme_get_ns(ns))
continue;
@@ -3533,7 +3784,7 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (ns->head->ns_id > nsid)
break;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return ret;
}
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
@@ -3547,7 +3798,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
if (tmp->head->ns_id < ns->head->ns_id) {
- list_add(&ns->list, &tmp->list);
+ list_add_rcu(&ns->list, &tmp->list);
return;
}
}
@@ -3556,6 +3807,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
{
+ struct queue_limits lim = { };
struct nvme_ns *ns;
struct gendisk *disk;
int node = ctrl->numa_node;
@@ -3564,7 +3816,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
if (!ns)
return;
- disk = blk_mq_alloc_disk(ctrl->tagset, ns);
+ if (ctrl->opts && ctrl->opts->data_digest)
+ lim.features |= BLK_FEAT_STABLE_WRITES;
+ if (ctrl->ops->supports_pci_p2pdma &&
+ ctrl->ops->supports_pci_p2pdma(ctrl))
+ lim.features |= BLK_FEAT_PCI_P2PDMA;
+
+ disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns);
if (IS_ERR(disk))
goto out_free_ns;
disk->fops = &nvme_bdev_ops;
@@ -3572,15 +3830,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
ns->disk = disk;
ns->queue = disk->queue;
-
- if (ctrl->opts && ctrl->opts->data_digest)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
- if (ctrl->ops->supports_pci_p2pdma &&
- ctrl->ops->supports_pci_p2pdma(ctrl))
- blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
-
ns->ctrl = ctrl;
kref_init(&ns->kref);
@@ -3613,12 +3862,21 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
if (nvme_update_ns_info(ns, info))
goto out_unlink_ns;
- down_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
+ /*
+ * Ensure that no namespaces are added to the ctrl list after the queues
+ * are frozen, thereby avoiding a deadlock between scan and reset.
+ */
+ if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
+ mutex_unlock(&ctrl->namespaces_lock);
+ goto out_unlink_ns;
+ }
nvme_ns_add_to_ctrl_list(ns);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
nvme_get_ctrl(ctrl);
- if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
+ if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups))
goto out_cleanup_ns_from_list;
if (!nvme_ns_head_multipath(ns->head))
@@ -3627,13 +3885,21 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
+ /*
+ * Set ns->disk->device->driver_data to ns so we can access
+ * ns->head->passthru_err_log_enabled in
+ * nvme_io_passthru_err_log_enabled_[store | show]().
+ */
+ dev_set_drvdata(disk_to_dev(ns->disk), ns);
+
return;
out_cleanup_ns_from_list:
nvme_put_ctrl(ctrl);
- down_write(&ctrl->namespaces_rwsem);
- list_del_init(&ns->list);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
+ list_del_rcu(&ns->list);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
@@ -3683,9 +3949,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
- down_write(&ns->ctrl->namespaces_rwsem);
- list_del_init(&ns->list);
- up_write(&ns->ctrl->namespaces_rwsem);
+ mutex_lock(&ns->ctrl->namespaces_lock);
+ list_del_rcu(&ns->list);
+ mutex_unlock(&ns->ctrl->namespaces_lock);
+ synchronize_srcu(&ns->ctrl->srcu);
if (last_path)
nvme_mpath_shutdown_disk(ns->head);
@@ -3704,7 +3971,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
{
- int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
dev_err(ns->ctrl->device,
@@ -3720,7 +3987,7 @@ out:
*
* TODO: we should probably schedule a delayed retry here.
*/
- if (ret > 0 && (ret & NVME_SC_DNR))
+ if (ret > 0 && (ret & NVME_STATUS_DNR))
nvme_ns_remove(ns);
}
@@ -3769,22 +4036,53 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
}
}
+/**
+ * struct async_scan_info - keeps track of controller & NSIDs to scan
+ * @ctrl: Controller on which namespaces are being scanned
+ * @next_nsid: Index of next NSID to scan in ns_list
+ * @ns_list: Pointer to list of NSIDs to scan
+ *
+ * Note: There is a single async_scan_info structure shared by all instances
+ * of nvme_scan_ns_async() scanning a given controller, so the atomic
+ * operations on next_nsid are critical to ensure each instance scans a unique
+ * NSID.
+ */
+struct async_scan_info {
+ struct nvme_ctrl *ctrl;
+ atomic_t next_nsid;
+ __le32 *ns_list;
+};
+
+static void nvme_scan_ns_async(void *data, async_cookie_t cookie)
+{
+ struct async_scan_info *scan_info = data;
+ int idx;
+ u32 nsid;
+
+ idx = (u32)atomic_fetch_inc(&scan_info->next_nsid);
+ nsid = le32_to_cpu(scan_info->ns_list[idx]);
+
+ nvme_scan_ns(scan_info->ctrl, nsid);
+}
+
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid)
{
struct nvme_ns *ns, *next;
LIST_HEAD(rm_list);
- down_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->head->ns_id > nsid)
- list_move_tail(&ns->list, &rm_list);
+ if (ns->head->ns_id > nsid) {
+ list_del_rcu(&ns->list);
+ synchronize_srcu(&ctrl->srcu);
+ list_add_tail_rcu(&ns->list, &rm_list);
+ }
}
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
list_for_each_entry_safe(ns, next, &rm_list, list)
nvme_ns_remove(ns);
-
}
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
@@ -3793,11 +4091,15 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
__le32 *ns_list;
u32 prev = 0;
int ret = 0, i;
+ ASYNC_DOMAIN(domain);
+ struct async_scan_info scan_info;
ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!ns_list)
return -ENOMEM;
+ scan_info.ctrl = ctrl;
+ scan_info.ns_list = ns_list;
for (;;) {
struct nvme_command cmd = {
.identify.opcode = nvme_admin_identify,
@@ -3813,19 +4115,23 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
goto free;
}
+ atomic_set(&scan_info.next_nsid, 0);
for (i = 0; i < nr_entries; i++) {
u32 nsid = le32_to_cpu(ns_list[i]);
if (!nsid) /* end of the list? */
goto out;
- nvme_scan_ns(ctrl, nsid);
+ async_schedule_domain(nvme_scan_ns_async, &scan_info,
+ &domain);
while (++prev < nsid)
nvme_ns_remove_by_nsid(ctrl, prev);
}
+ async_synchronize_full_domain(&domain);
}
out:
nvme_remove_invalid_namespaces(ctrl, prev);
free:
+ async_synchronize_full_domain(&domain);
kfree(ns_list);
return ret;
}
@@ -3878,7 +4184,7 @@ static void nvme_scan_work(struct work_struct *work)
int ret;
/* No tagset on a live ctrl means IO queues could not created */
- if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
return;
/*
@@ -3910,7 +4216,7 @@ static void nvme_scan_work(struct work_struct *work)
* they report) but don't actually support it.
*/
ret = nvme_scan_ns_list(ctrl);
- if (ret > 0 && ret & NVME_SC_DNR)
+ if (ret > 0 && ret & NVME_STATUS_DNR)
nvme_scan_ns_sequential(ctrl);
}
mutex_unlock(&ctrl->scan_lock);
@@ -3948,15 +4254,16 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
* removing the namespaces' disks; fail all the queues now to avoid
* potentially having to clean up the failed sync later.
*/
- if (ctrl->state == NVME_CTRL_DEAD)
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
nvme_mark_namespaces_dead(ctrl);
/* this is a no-op when called from the controller reset handler */
nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
- down_write(&ctrl->namespaces_rwsem);
- list_splice_init(&ctrl->namespaces, &ns_list);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
+ list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
list_for_each_entry_safe(ns, next, &ns_list, list)
nvme_ns_remove(ns);
@@ -4030,7 +4337,7 @@ static void nvme_async_event_work(struct work_struct *work)
* flushing ctrl async_event_work after changing the controller state
* from LIVE and before freeing the admin queue.
*/
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
ctrl->ops->submit_async_event(ctrl);
}
@@ -4051,14 +4358,30 @@ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
struct nvme_fw_slot_info_log *log;
+ u8 next_fw_slot, cur_fw_slot;
log = kmalloc(sizeof(*log), GFP_KERNEL);
if (!log)
return;
if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
- log, sizeof(*log), 0))
+ log, sizeof(*log), 0)) {
dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
+ goto out_free_log;
+ }
+
+ cur_fw_slot = log->afi & 0x7;
+ next_fw_slot = (log->afi & 0x70) >> 4;
+ if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) {
+ dev_info(ctrl->device,
+ "Firmware is activated after next Controller Level Reset\n");
+ goto out_free_log;
+ }
+
+ memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1],
+ sizeof(ctrl->subsys->firmware_rev));
+
+out_free_log:
kfree(log);
}
@@ -4068,6 +4391,8 @@ static void nvme_fw_act_work(struct work_struct *work)
struct nvme_ctrl, fw_act_work);
unsigned long fw_act_timeout;
+ nvme_auth_stop(ctrl);
+
if (ctrl->mtfa)
fw_act_timeout = jiffies +
msecs_to_jiffies(ctrl->mtfa * 100);
@@ -4123,7 +4448,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
* firmware activation.
*/
if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
- nvme_auth_stop(ctrl);
requeue = false;
queue_work(nvme_wq, &ctrl->fw_act_work);
}
@@ -4146,7 +4470,8 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
{
- dev_warn(ctrl->device, "resetting controller due to AER\n");
+ dev_warn(ctrl->device,
+ "resetting controller due to persistent internal error\n");
nvme_reset_ctrl(ctrl);
}
@@ -4193,13 +4518,15 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int cmd_size)
{
+ struct queue_limits lim = {};
int ret;
memset(set, 0, sizeof(*set));
set->ops = ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
if (ctrl->ops->flags & NVME_F_FABRICS)
- set->reserved_tags = NVMF_RESERVED_TAGS;
+ /* Reserved for fabric connect and keep alive */
+ set->reserved_tags = 2;
set->numa_node = ctrl->numa_node;
set->flags = BLK_MQ_F_NO_SCHED;
if (ctrl->ops->flags & NVME_F_BLOCKING)
@@ -4212,14 +4539,14 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ret)
return ret;
- ctrl->admin_q = blk_mq_init_queue(set);
+ ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
if (IS_ERR(ctrl->admin_q)) {
ret = PTR_ERR(ctrl->admin_q);
goto out_free_tagset;
}
if (ctrl->ops->flags & NVME_F_FABRICS) {
- ctrl->fabrics_q = blk_mq_init_queue(set);
+ ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(ctrl->fabrics_q)) {
ret = PTR_ERR(ctrl->fabrics_q);
goto out_cleanup_admin_q;
@@ -4268,12 +4595,13 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
set->reserved_tags = NVME_AQ_DEPTH;
else if (ctrl->ops->flags & NVME_F_FABRICS)
- set->reserved_tags = NVMF_RESERVED_TAGS;
+ /* Reserved for fabric connect */
+ set->reserved_tags = 1;
set->numa_node = ctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE;
if (ctrl->ops->flags & NVME_F_BLOCKING)
set->flags |= BLK_MQ_F_BLOCKING;
- set->cmd_size = cmd_size,
+ set->cmd_size = cmd_size;
set->driver_data = ctrl;
set->nr_hw_queues = ctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
@@ -4283,13 +4611,15 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
return ret;
if (ctrl->ops->flags & NVME_F_FABRICS) {
- ctrl->connect_q = blk_mq_init_queue(set);
+ struct queue_limits lim = {
+ .features = BLK_FEAT_SKIP_TAGSET_QUIESCE,
+ };
+
+ ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL);
if (IS_ERR(ctrl->connect_q)) {
ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set;
}
- blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE,
- ctrl->connect_q);
}
ctrl->tagset = set;
@@ -4316,7 +4646,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
nvme_auth_stop(ctrl);
- nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work);
@@ -4327,8 +4656,6 @@ EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
- nvme_start_keep_alive(ctrl);
-
nvme_enable_aen(ctrl);
/*
@@ -4354,6 +4681,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ nvme_stop_keep_alive(ctrl);
nvme_hwmon_exit(ctrl);
nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device);
@@ -4383,9 +4711,9 @@ static void nvme_free_ctrl(struct device *dev)
if (!subsys || ctrl->instance != subsys->instance)
ida_free(&nvme_instance_ida, ctrl->instance);
-
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
+ cleanup_srcu_struct(&ctrl->srcu);
nvme_auth_stop(ctrl);
nvme_auth_free(ctrl);
__free_page(ctrl->discard_page);
@@ -4408,19 +4736,28 @@ static void nvme_free_ctrl(struct device *dev)
* Initialize a NVMe controller structures. This needs to be called during
* earliest initialization so that we have the initialized structured around
* during probing.
+ *
+ * On success, the caller must use the nvme_put_ctrl() to release this when
+ * needed, which also invokes the ops->free_ctrl() callback.
*/
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks)
{
int ret;
- ctrl->state = NVME_CTRL_NEW;
+ WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
+ ctrl->passthru_err_log_enabled = false;
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
+ mutex_init(&ctrl->namespaces_lock);
+
+ ret = init_srcu_struct(&ctrl->srcu);
+ if (ret)
+ return ret;
+
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
xa_init(&ctrl->cels);
- init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
@@ -4435,6 +4772,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
+ ctrl->ka_last_check_time = jiffies;
BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
PAGE_SIZE);
@@ -4449,11 +4787,17 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
goto out;
ctrl->instance = ret;
+ ret = nvme_auth_init_ctrl(ctrl);
+ if (ret)
+ goto out_release_instance;
+
+ nvme_mpath_init_ctrl(ctrl);
+
device_initialize(&ctrl->ctrl_device);
ctrl->device = &ctrl->ctrl_device;
ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
ctrl->instance);
- ctrl->device->class = nvme_class;
+ ctrl->device->class = &nvme_class;
ctrl->device->parent = ctrl->dev;
if (ops->dev_attr_groups)
ctrl->device->groups = ops->dev_attr_groups;
@@ -4461,16 +4805,36 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->device->groups = nvme_dev_attr_groups;
ctrl->device->release = nvme_free_ctrl;
dev_set_drvdata(ctrl->device, ctrl);
+
+ return ret;
+
+out_release_instance:
+ ida_free(&nvme_instance_ida, ctrl->instance);
+out:
+ if (ctrl->discard_page)
+ __free_page(ctrl->discard_page);
+ cleanup_srcu_struct(&ctrl->srcu);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+
+/*
+ * On success, returns with an elevated controller reference and caller must
+ * use nvme_uninit_ctrl() to properly free resources associated with the ctrl.
+ */
+int nvme_add_ctrl(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
if (ret)
- goto out_release_instance;
+ return ret;
- nvme_get_ctrl(ctrl);
cdev_init(&ctrl->cdev, &nvme_dev_fops);
- ctrl->cdev.owner = ops->module;
+ ctrl->cdev.owner = ctrl->ops->module;
ret = cdev_device_add(&ctrl->cdev, ctrl->device);
if (ret)
- goto out_free_name;
+ return ret;
/*
* Initialize latency tolerance controls. The sysfs files won't
@@ -4481,62 +4845,50 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
- nvme_mpath_init_ctrl(ctrl);
- ret = nvme_auth_init_ctrl(ctrl);
- if (ret)
- goto out_free_cdev;
+ nvme_get_ctrl(ctrl);
return 0;
-out_free_cdev:
- nvme_fault_inject_fini(&ctrl->fault_inject);
- dev_pm_qos_hide_latency_tolerance(ctrl->device);
- cdev_device_del(&ctrl->cdev, ctrl->device);
-out_free_name:
- nvme_put_ctrl(ctrl);
- kfree_const(ctrl->device->kobj.name);
-out_release_instance:
- ida_free(&nvme_instance_ida, ctrl->instance);
-out:
- if (ctrl->discard_page)
- __free_page(ctrl->discard_page);
- return ret;
}
-EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+EXPORT_SYMBOL_GPL(nvme_add_ctrl);
/* let I/O to all namespaces fail in preparation for surprise removal */
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mark_disk_dead(ns->disk);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
void nvme_unfreeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mq_unfreeze_queue(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
if (timeout <= 0)
break;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return timeout;
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
@@ -4544,22 +4896,25 @@ EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
void nvme_wait_freeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mq_freeze_queue_wait(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze);
void nvme_start_freeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_freeze_queue_start(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);
@@ -4602,11 +4957,12 @@ EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_sync_queue(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
@@ -4684,29 +5040,22 @@ static int __init nvme_core_init(void)
if (result < 0)
goto destroy_delete_wq;
- nvme_class = class_create("nvme");
- if (IS_ERR(nvme_class)) {
- result = PTR_ERR(nvme_class);
+ result = class_register(&nvme_class);
+ if (result)
goto unregister_chrdev;
- }
- nvme_class->dev_uevent = nvme_class_uevent;
- nvme_subsys_class = class_create("nvme-subsystem");
- if (IS_ERR(nvme_subsys_class)) {
- result = PTR_ERR(nvme_subsys_class);
+ result = class_register(&nvme_subsys_class);
+ if (result)
goto destroy_class;
- }
result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
"nvme-generic");
if (result < 0)
goto destroy_subsys_class;
- nvme_ns_chr_class = class_create("nvme-generic");
- if (IS_ERR(nvme_ns_chr_class)) {
- result = PTR_ERR(nvme_ns_chr_class);
+ result = class_register(&nvme_ns_chr_class);
+ if (result)
goto unregister_generic_ns;
- }
result = nvme_init_auth();
if (result)
@@ -4714,13 +5063,13 @@ static int __init nvme_core_init(void)
return 0;
destroy_ns_chr:
- class_destroy(nvme_ns_chr_class);
+ class_unregister(&nvme_ns_chr_class);
unregister_generic_ns:
unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
destroy_subsys_class:
- class_destroy(nvme_subsys_class);
+ class_unregister(&nvme_subsys_class);
destroy_class:
- class_destroy(nvme_class);
+ class_unregister(&nvme_class);
unregister_chrdev:
unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_delete_wq:
@@ -4736,9 +5085,9 @@ out:
static void __exit nvme_core_exit(void)
{
nvme_exit_auth();
- class_destroy(nvme_ns_chr_class);
- class_destroy(nvme_subsys_class);
- class_destroy(nvme_class);
+ class_unregister(&nvme_ns_chr_class);
+ class_unregister(&nvme_subsys_class);
+ class_unregister(&nvme_class);
unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_workqueue(nvme_delete_wq);
@@ -4750,5 +5099,6 @@ static void __exit nvme_core_exit(void)
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("NVMe host core framework");
module_init(nvme_core_init);
module_exit(nvme_core_exit);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 8175d49f2909..432efcbf9e2f 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -12,6 +12,7 @@
#include <linux/seq_file.h>
#include "nvme.h"
#include "fabrics.h"
+#include <linux/nvme-keyring.h>
static LIST_HEAD(nvmf_transports);
static DECLARE_RWSEM(nvmf_transports_rwsem);
@@ -179,14 +180,14 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
if (unlikely(ret != 0))
dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n",
- ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
return ret;
}
@@ -225,14 +226,14 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
if (unlikely(ret != 0))
dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n",
- ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
return ret;
}
EXPORT_SYMBOL_GPL(nvmf_reg_read64);
@@ -270,15 +271,30 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
- ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
return ret;
}
EXPORT_SYMBOL_GPL(nvmf_reg_write32);
+int nvmf_subsystem_reset(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
+ if (!nvme_wait_reset(ctrl))
+ return -EBUSY;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, NVME_SUBSYS_RESET);
+ if (ret)
+ return ret;
+
+ return nvme_try_sched_reset(ctrl);
+}
+EXPORT_SYMBOL_GPL(nvmf_subsystem_reset);
+
/**
* nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
* connect() errors.
@@ -294,7 +310,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int errval, int offset, struct nvme_command *cmd,
struct nvmf_connect_data *data)
{
- int err_sctype = errval & ~NVME_SC_DNR;
+ int err_sctype = errval & ~NVME_STATUS_DNR;
if (errval < 0) {
dev_err(ctrl->device,
@@ -386,8 +402,8 @@ static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl,
uuid_copy(&data->hostid, &ctrl->opts->host->id);
data->cntlid = cpu_to_le16(cntlid);
- strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
- strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+ strscpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strscpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
return data;
}
@@ -427,12 +443,6 @@ static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid,
* fabrics-protocol connection of the NVMe Admin queue between the
* host system device and the allocated NVMe controller on the
* target system via a NVMe Fabrics "Connect" command.
- *
- * Return:
- * 0: success
- * > 0: NVMe error status code
- * < 0: Linux errno error code
- *
*/
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
{
@@ -449,8 +459,10 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), NVME_QID_ANY, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), NVME_QID_ANY,
+ NVME_SUBMIT_AT_HEAD |
+ NVME_SUBMIT_NOWAIT |
+ NVME_SUBMIT_RESERVED);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -464,7 +476,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
if (result & NVME_CONNECT_AUTHREQ_ASCR) {
dev_warn(ctrl->device,
"qid 0: secure concatenation is not supported\n");
- ret = NVME_SC_AUTH_REQUIRED;
+ ret = -EOPNOTSUPP;
goto out_free_data;
}
/* Authentication required */
@@ -472,14 +484,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
if (ret) {
dev_warn(ctrl->device,
"qid 0: authentication setup failed\n");
- ret = NVME_SC_AUTH_REQUIRED;
goto out_free_data;
}
ret = nvme_auth_wait(ctrl, 0);
- if (ret)
+ if (ret) {
dev_warn(ctrl->device,
- "qid 0: authentication failed\n");
- else
+ "qid 0: authentication failed, error %d\n",
+ ret);
+ } else
dev_info(ctrl->device,
"qid 0: authenticated\n");
}
@@ -524,11 +536,14 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), qid, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), qid,
+ NVME_SUBMIT_AT_HEAD |
+ NVME_SUBMIT_RESERVED |
+ NVME_SUBMIT_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
+ goto out_free_data;
}
result = le32_to_cpu(res.u32);
if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
@@ -536,7 +551,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
if (result & NVME_CONNECT_AUTHREQ_ASCR) {
dev_warn(ctrl->device,
"qid 0: secure concatenation is not supported\n");
- ret = NVME_SC_AUTH_REQUIRED;
+ ret = -EOPNOTSUPP;
goto out_free_data;
}
/* Authentication required */
@@ -544,12 +559,13 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
if (ret) {
dev_warn(ctrl->device,
"qid %d: authentication setup failed\n", qid);
- ret = NVME_SC_AUTH_REQUIRED;
- } else {
- ret = nvme_auth_wait(ctrl, qid);
- if (ret)
- dev_warn(ctrl->device,
- "qid %u: authentication failed\n", qid);
+ goto out_free_data;
+ }
+ ret = nvme_auth_wait(ctrl, qid);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %u: authentication failed, error %d\n",
+ qid, ret);
}
}
out_free_data:
@@ -558,8 +574,26 @@ out_free_data:
}
EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
-bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
+/*
+ * Evaluate the status information returned by the transport in order to decided
+ * if a reconnect attempt should be scheduled.
+ *
+ * Do not retry when:
+ *
+ * - the DNR bit is set and the specification states no further connect
+ * attempts with the same set of paramenters should be attempted.
+ *
+ * - when the authentication attempt fails, because the key was invalid.
+ * This error code is set on the host side.
+ */
+bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
{
+ if (status > 0 && (status & NVME_STATUS_DNR))
+ return false;
+
+ if (status == -EKEYREJECTED)
+ return false;
+
if (ctrl->opts->max_reconnects == -1 ||
ctrl->nr_reconnects < ctrl->opts->max_reconnects)
return true;
@@ -622,6 +656,23 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
return NULL;
}
+static struct key *nvmf_parse_key(int key_id)
+{
+ struct key *key;
+
+ if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) {
+ pr_err("TLS is not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ key = nvme_tls_key_lookup(key_id);
+ if (IS_ERR(key))
+ pr_err("key id %08x not found\n", key_id);
+ else
+ pr_debug("Using key id %08x\n", key_id);
+ return key;
+}
+
static const match_table_t opt_tokens = {
{ NVMF_OPT_TRANSPORT, "transport=%s" },
{ NVMF_OPT_TRADDR, "traddr=%s" },
@@ -643,10 +694,19 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_TOS, "tos=%d" },
+#ifdef CONFIG_NVME_TCP_TLS
+ { NVMF_OPT_KEYRING, "keyring=%d" },
+ { NVMF_OPT_TLS_KEY, "tls_key=%d" },
+#endif
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" },
+#ifdef CONFIG_NVME_HOST_AUTH
{ NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
{ NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
+#endif
+#ifdef CONFIG_NVME_TCP_TLS
+ { NVMF_OPT_TLS, "tls" },
+#endif
{ NVMF_OPT_ERR, NULL }
};
@@ -657,9 +717,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
char *options, *o, *p;
int token, ret = 0;
size_t nqnlen = 0;
- int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
+ int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO, key_id;
uuid_t hostid;
char hostnqn[NVMF_NQN_SIZE];
+ struct key *key;
/* Set defaults */
opts->queue_size = NVMF_DEF_QUEUE_SIZE;
@@ -671,6 +732,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->hdr_digest = false;
opts->data_digest = false;
opts->tos = -1; /* < 0 == use transport default */
+ opts->tls = false;
+ opts->tls_key = NULL;
+ opts->keyring = NULL;
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
@@ -924,6 +988,32 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->tos = token;
break;
+ case NVMF_OPT_KEYRING:
+ if (match_int(args, &key_id) || key_id <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ key = nvmf_parse_key(key_id);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto out;
+ }
+ key_put(opts->keyring);
+ opts->keyring = key;
+ break;
+ case NVMF_OPT_TLS_KEY:
+ if (match_int(args, &key_id) || key_id <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ key = nvmf_parse_key(key_id);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto out;
+ }
+ key_put(opts->tls_key);
+ opts->tls_key = key;
+ break;
case NVMF_OPT_DISCOVERY:
opts->discovery_nqn = true;
break;
@@ -955,6 +1045,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
kfree(opts->dhchap_ctrl_secret);
opts->dhchap_ctrl_secret = p;
break;
+ case NVMF_OPT_TLS:
+ if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) {
+ pr_err("TLS is not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->tls = true;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -1156,6 +1254,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
void nvmf_free_options(struct nvmf_ctrl_options *opts)
{
nvmf_host_put(opts->host);
+ key_put(opts->keyring);
+ key_put(opts->tls_key);
kfree(opts->transport);
kfree(opts->traddr);
kfree(opts->trsvcid);
@@ -1247,7 +1347,10 @@ out_free_opts:
return ERR_PTR(ret);
}
-static struct class *nvmf_class;
+static const struct class nvmf_class = {
+ .name = "nvme-fabrics",
+};
+
static struct device *nvmf_device;
static DEFINE_MUTEX(nvmf_dev_mutex);
@@ -1300,10 +1403,10 @@ static void __nvmf_concat_opt_tokens(struct seq_file *seq_file)
tok = &opt_tokens[idx];
if (tok->token == NVMF_OPT_ERR)
continue;
- seq_puts(seq_file, ",");
+ seq_putc(seq_file, ',');
seq_puts(seq_file, tok->pattern);
}
- seq_puts(seq_file, "\n");
+ seq_putc(seq_file, '\n');
}
static int nvmf_dev_show(struct seq_file *seq_file, void *private)
@@ -1367,15 +1470,14 @@ static int __init nvmf_init(void)
if (!nvmf_default_host)
return -ENOMEM;
- nvmf_class = class_create("nvme-fabrics");
- if (IS_ERR(nvmf_class)) {
+ ret = class_register(&nvmf_class);
+ if (ret) {
pr_err("couldn't register class nvme-fabrics\n");
- ret = PTR_ERR(nvmf_class);
goto out_free_host;
}
nvmf_device =
- device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
+ device_create(&nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
if (IS_ERR(nvmf_device)) {
pr_err("couldn't create nvme-fabrics device!\n");
ret = PTR_ERR(nvmf_device);
@@ -1391,9 +1493,9 @@ static int __init nvmf_init(void)
return 0;
out_destroy_device:
- device_destroy(nvmf_class, MKDEV(0, 0));
+ device_destroy(&nvmf_class, MKDEV(0, 0));
out_destroy_class:
- class_destroy(nvmf_class);
+ class_unregister(&nvmf_class);
out_free_host:
nvmf_host_put(nvmf_default_host);
return ret;
@@ -1402,8 +1504,8 @@ out_free_host:
static void __exit nvmf_exit(void)
{
misc_deregister(&nvmf_misc);
- device_destroy(nvmf_class, MKDEV(0, 0));
- class_destroy(nvmf_class);
+ device_destroy(&nvmf_class, MKDEV(0, 0));
+ class_unregister(&nvmf_class);
nvmf_host_put(nvmf_default_host);
BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
@@ -1421,6 +1523,7 @@ static void __exit nvmf_exit(void)
}
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NVMe host fabrics library");
module_init(nvmf_init);
module_exit(nvmf_exit);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 82e7a27ffbde..21d75dc4a3a0 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -19,13 +19,6 @@
#define NVMF_DEF_FAIL_FAST_TMO -1
/*
- * Reserved one command for internal usage. This command is used for sending
- * the connect command, as well as for the keep alive command on the admin
- * queue once live.
- */
-#define NVMF_RESERVED_TAGS 1
-
-/*
* Define a host as seen by the target. We allocate one at boot, but also
* allow the override it when creating controllers. This is both to provide
* persistence of the Host NQN over multiple boots, and to allow using
@@ -70,6 +63,9 @@ enum {
NVMF_OPT_DISCOVERY = 1 << 22,
NVMF_OPT_DHCHAP_SECRET = 1 << 23,
NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24,
+ NVMF_OPT_TLS = 1 << 25,
+ NVMF_OPT_KEYRING = 1 << 26,
+ NVMF_OPT_TLS_KEY = 1 << 27,
};
/**
@@ -102,6 +98,9 @@ enum {
* @dhchap_secret: DH-HMAC-CHAP secret
* @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
* authentication
+ * @keyring: Keyring to use for key lookups
+ * @tls_key: TLS key for encrypted connections (TCP)
+ * @tls: Start TLS encrypted connections (TCP)
* @disable_sqflow: disable controller sq flow control
* @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP)
@@ -128,6 +127,9 @@ struct nvmf_ctrl_options {
struct nvmf_host *host;
char *dhchap_secret;
char *dhchap_ctrl_secret;
+ struct key *keyring;
+ struct key *tls_key;
+ bool tls;
bool disable_sqflow;
bool hdr_digest;
bool data_digest;
@@ -176,9 +178,11 @@ static inline bool
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
- if (ctrl->state == NVME_CTRL_DELETING ||
- ctrl->state == NVME_CTRL_DELETING_NOIO ||
- ctrl->state == NVME_CTRL_DEAD ||
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (state == NVME_CTRL_DELETING ||
+ state == NVME_CTRL_DELETING_NOIO ||
+ state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
!uuid_equal(&opts->host->id, &ctrl->opts->host->id))
@@ -213,13 +217,14 @@ static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
+int nvmf_subsystem_reset(struct nvme_ctrl *ctrl);
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
int nvmf_register_transport(struct nvmf_transport_ops *ops);
void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
-bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
+bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status);
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts);
void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c
index 1ba10a5c656d..105d6cb41c72 100644
--- a/drivers/nvme/host/fault_inject.c
+++ b/drivers/nvme/host/fault_inject.c
@@ -6,6 +6,7 @@
*/
#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
#include "nvme.h"
static DECLARE_FAULT_ATTR(fail_default_attr);
@@ -75,7 +76,7 @@ void nvme_should_fail(struct request *req)
/* inject status code and DNR bit */
status = fault_inject->status;
if (fault_inject->dont_retry)
- status |= NVME_SC_DNR;
+ status |= NVME_STATUS_DNR;
nvme_req(req)->status = status;
}
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 1cd2bf82319a..b81af7919e94 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
-static struct workqueue_struct *nvme_fc_wq;
-
-static bool nvme_fc_waiting_to_unload;
-static DECLARE_COMPLETION(nvme_fc_unload_proceed);
-
/*
* These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init.
@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
/* remove from transport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
list_del(&lport->port_list);
- if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
- complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
@@ -557,7 +550,7 @@ nvme_fc_rport_get(struct nvme_fc_rport *rport)
static void
nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
{
- switch (ctrl->ctrl.state) {
+ switch (nvme_ctrl_state(&ctrl->ctrl)) {
case NVME_CTRL_NEW:
case NVME_CTRL_CONNECTING:
/*
@@ -793,7 +786,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
"NVME-FC{%d}: controller connectivity lost. Awaiting "
"Reconnect", ctrl->cnum);
- switch (ctrl->ctrl.state) {
+ switch (nvme_ctrl_state(&ctrl->ctrl)) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
/*
@@ -1218,10 +1211,10 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
/* Linux supports only Dynamic controllers */
assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
- strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
- min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
- strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
- min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
+ strscpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
+ sizeof(assoc_rqst->assoc_cmd.hostnqn));
+ strscpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
+ sizeof(assoc_rqst->assoc_cmd.subnqn));
lsop->queue = queue;
lsreq->rqstaddr = assoc_rqst;
@@ -1924,7 +1917,7 @@ char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
struct request *rq = op->rq;
- if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio)
return NULL;
return blkcg_get_fc_appid(rq->bio);
}
@@ -2435,7 +2428,7 @@ nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
* controller. Called after last nvme_put_ctrl() call
*/
static void
-nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
+nvme_fc_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
@@ -2548,24 +2541,17 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
* the controller. Abort any ios on the association and let the
* create_association error path resolve things.
*/
- enum nvme_ctrl_state state;
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl->lock, flags);
- state = ctrl->ctrl.state;
- if (state == NVME_CTRL_CONNECTING) {
- set_bit(ASSOC_FAILED, &ctrl->flags);
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
__nvme_fc_abort_outstanding_ios(ctrl, true);
+ set_bit(ASSOC_FAILED, &ctrl->flags);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport error during (re)connect\n",
ctrl->cnum);
return;
}
- spin_unlock_irqrestore(&ctrl->lock, flags);
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
- if (state != NVME_CTRL_LIVE)
+ if (ctrl->ctrl.state != NVME_CTRL_LIVE)
return;
dev_warn(ctrl->ctrl.device,
@@ -2581,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
+ u16 qnum = op->queue->qnum;
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
@@ -2589,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
* will detect the aborted io and will fail the connection.
*/
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
+ "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: "
"x%08x/x%08x\n",
- ctrl->cnum, op->queue->qnum, sqe->common.opcode,
- sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
+ ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype,
+ nvme_fabrics_opcode_str(qnum, sqe),
+ sqe->common.cdw10, sqe->common.cdw11);
if (__nvme_fc_abort_op(ctrl, op))
nvme_fc_error_recovery(ctrl, "io timeout abort failed");
@@ -3132,27 +3120,28 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
nvme_unquiesce_admin_queue(&ctrl->ctrl);
ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
- if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
- ret = -EIO;
if (ret)
goto out_disconnect_admin_queue;
-
+ if (test_bit(ASSOC_FAILED, &ctrl->flags)) {
+ ret = -EIO;
+ goto out_stop_keep_alive;
+ }
/* sanity checks */
/* FC-NVME does not have other data in the capsule */
if (ctrl->ctrl.icdoff) {
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
ctrl->ctrl.icdoff);
- ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_disconnect_admin_queue;
+ ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out_stop_keep_alive;
}
/* FC-NVME supports normal SGL Data Block Descriptors */
if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n");
- ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_disconnect_admin_queue;
+ ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto out_stop_keep_alive;
}
if (opts->queue_size > ctrl->ctrl.maxcmd) {
@@ -3179,16 +3168,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
else
ret = nvme_fc_recreate_io_queues(ctrl);
}
-
- spin_lock_irqsave(&ctrl->lock, flags);
if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
ret = -EIO;
- if (ret) {
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (ret)
goto out_term_aen_ops;
- }
+
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- spin_unlock_irqrestore(&ctrl->lock, flags);
ctrl->ctrl.nr_reconnects = 0;
@@ -3199,6 +3184,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
out_term_aen_ops:
nvme_fc_term_aen_ops(ctrl);
+out_stop_keep_alive:
+ nvme_stop_keep_alive(&ctrl->ctrl);
out_disconnect_admin_queue:
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",
@@ -3316,19 +3303,17 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
bool recon = true;
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
+ if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_CONNECTING)
return;
if (portptr->port_state == FC_OBJSTATE_ONLINE) {
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
ctrl->cnum, status);
- if (status > 0 && (status & NVME_SC_DNR))
- recon = false;
} else if (time_after_eq(jiffies, rport->dev_loss_end))
recon = false;
- if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
+ if (recon && nvmf_should_reconnect(&ctrl->ctrl, status)) {
if (portptr->port_state == FC_OBJSTATE_ONLINE)
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: Reconnect attempt in %ld "
@@ -3340,7 +3325,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
} else {
if (portptr->port_state == FC_OBJSTATE_ONLINE) {
- if (status > 0 && (status & NVME_SC_DNR))
+ if (status > 0 && (status & NVME_STATUS_DNR))
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: reconnect failure\n",
ctrl->cnum);
@@ -3397,7 +3382,8 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
- .free_ctrl = nvme_fc_nvme_ctrl_freed,
+ .subsystem_reset = nvmf_subsystem_reset,
+ .free_ctrl = nvme_fc_free_ctrl,
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_delete_ctrl,
.get_address = nvmf_get_address,
@@ -3459,12 +3445,11 @@ nvme_fc_existing_controller(struct nvme_fc_rport *rport,
return found;
}
-static struct nvme_ctrl *
-nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+static struct nvme_fc_ctrl *
+nvme_fc_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
{
struct nvme_fc_ctrl *ctrl;
- unsigned long flags;
int ret, idx, ctrl_loss_tmo;
if (!(rport->remoteport.port_role &
@@ -3506,10 +3491,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->ctrl.opts = opts;
ctrl->ctrl.nr_reconnects = 0;
- if (lport->dev)
- ctrl->ctrl.numa_node = dev_to_node(lport->dev);
- else
- ctrl->ctrl.numa_node = NUMA_NO_NODE;
INIT_LIST_HEAD(&ctrl->ctrl_list);
ctrl->lport = lport;
ctrl->rport = rport;
@@ -3554,8 +3535,38 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
if (ret)
goto out_free_queues;
+ if (lport->dev)
+ ctrl->ctrl.numa_node = dev_to_node(lport->dev);
- /* at this point, teardown path changes to ref counting on nvme ctrl */
+ return ctrl;
+
+out_free_queues:
+ kfree(ctrl->queues);
+out_free_ida:
+ put_device(ctrl->dev);
+ ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
+out_free_ctrl:
+ kfree(ctrl);
+out_fail:
+ /* exit via here doesn't follow ctlr ref points */
+ return ERR_PTR(ret);
+}
+
+static struct nvme_ctrl *
+nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+ int ret;
+
+ ctrl = nvme_fc_alloc_ctrl(dev, opts, lport, rport);
+ if (IS_ERR(ctrl))
+ return ERR_CAST(ctrl);
+
+ ret = nvme_add_ctrl(&ctrl->ctrl);
+ if (ret)
+ goto out_put_ctrl;
ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
&nvme_fc_admin_mq_ops,
@@ -3585,8 +3596,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
flush_delayed_work(&ctrl->connect_work);
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
- ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
+ "NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n",
+ ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn);
return &ctrl->ctrl;
@@ -3601,6 +3612,7 @@ fail_ctrl:
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
/* Remove core ctrl ref. */
nvme_put_ctrl(&ctrl->ctrl);
@@ -3614,20 +3626,8 @@ fail_ctrl:
nvme_fc_rport_get(rport);
return ERR_PTR(-EIO);
-
-out_free_queues:
- kfree(ctrl->queues);
-out_free_ida:
- put_device(ctrl->dev);
- ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
-out_free_ctrl:
- kfree(ctrl);
-out_fail:
- /* exit via here doesn't follow ctlr ref points */
- return ERR_PTR(ret);
}
-
struct nvmet_fc_traddr {
u64 nn;
u64 pn;
@@ -3904,10 +3904,6 @@ static int __init nvme_fc_init_module(void)
{
int ret;
- nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
- if (!nvme_fc_wq)
- return -ENOMEM;
-
/*
* NOTE:
* It is expected that in the future the kernel will combine
@@ -3925,7 +3921,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class);
if (ret) {
pr_err("couldn't register class fc\n");
- goto out_destroy_wq;
+ return ret;
}
/*
@@ -3949,8 +3945,6 @@ out_destroy_device:
device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
class_unregister(&fc_class);
-out_destroy_wq:
- destroy_workqueue(nvme_fc_wq);
return ret;
}
@@ -3970,48 +3964,27 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
spin_unlock(&rport->lock);
}
-static void
-nvme_fc_cleanup_for_unload(void)
+static void __exit nvme_fc_exit_module(void)
{
struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport;
-
- list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
- list_for_each_entry(rport, &lport->endp_list, endp_list) {
- nvme_fc_delete_controllers(rport);
- }
- }
-}
-
-static void __exit nvme_fc_exit_module(void)
-{
unsigned long flags;
- bool need_cleanup = false;
spin_lock_irqsave(&nvme_fc_lock, flags);
- nvme_fc_waiting_to_unload = true;
- if (!list_empty(&nvme_fc_lport_list)) {
- need_cleanup = true;
- nvme_fc_cleanup_for_unload();
- }
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
+ list_for_each_entry(rport, &lport->endp_list, endp_list)
+ nvme_fc_delete_controllers(rport);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
- if (need_cleanup) {
- pr_info("%s: waiting for ctlr deletes\n", __func__);
- wait_for_completion(&nvme_fc_unload_proceed);
- pr_info("%s: ctrl deletes complete\n", __func__);
- }
+ flush_workqueue(nvme_delete_wq);
nvmf_unregister_transport(&nvme_fc_transport);
- ida_destroy(&nvme_fc_local_port_cnt);
- ida_destroy(&nvme_fc_ctrl_cnt);
-
device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class);
- destroy_workqueue(nvme_fc_wq);
}
module_init(nvme_fc_init_module);
module_exit(nvme_fc_exit_module);
+MODULE_DESCRIPTION("NVMe host FC transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index 316f3e4ca7cc..8df73a0b3980 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -187,7 +187,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
return 0;
}
-static const struct hwmon_channel_info *nvme_hwmon_info[] = {
+static const struct hwmon_channel_info *const nvme_hwmon_info[] = {
HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index d39f3219358b..b9b79ccfabf8 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -3,9 +3,10 @@
* Copyright (c) 2011-2014, Intel Corporation.
* Copyright (c) 2017-2021 Christoph Hellwig.
*/
+#include <linux/blk-integrity.h>
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
-#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
#include "nvme.h"
enum {
@@ -18,15 +19,12 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
{
u32 effects;
- if (capable(CAP_SYS_ADMIN))
- return true;
-
/*
* Do not allow unprivileged passthrough on partitions, as that allows an
* escape from the containment of the partition.
*/
if (flags & NVME_IOCTL_PARTITION)
- return false;
+ goto admin;
/*
* Do not allow unprivileged processes to send vendor specific or fabrics
@@ -34,7 +32,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
*/
if (c->common.opcode >= nvme_cmd_vendor_start ||
c->common.opcode == nvme_fabrics_command)
- return false;
+ goto admin;
/*
* Do not allow unprivileged passthrough of admin commands except
@@ -53,7 +51,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
return true;
}
}
- return false;
+ goto admin;
}
/*
@@ -63,7 +61,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
*/
effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
if (!(effects & NVME_CMD_EFFECTS_CSUPP))
- return false;
+ goto admin;
/*
* Don't allow passthrough for command that have intrusive (or unknown)
@@ -72,16 +70,20 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
NVME_CMD_EFFECTS_UUID_SEL |
NVME_CMD_EFFECTS_SCOPE_MASK))
- return false;
+ goto admin;
/*
* Only allow I/O commands that transfer data to the controller or that
* change the logical block contents if the file descriptor is open for
* writing.
*/
- if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
- return open_for_write;
+ if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
+ !open_for_write)
+ goto admin;
+
return true;
+admin:
+ return capable(CAP_SYS_ADMIN);
}
/*
@@ -96,55 +98,6 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval)
return (void __user *)ptrval;
}
-static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
- unsigned len, u32 seed)
-{
- struct bio_integrity_payload *bip;
- int ret = -ENOMEM;
- void *buf;
- struct bio *bio = req->bio;
-
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- goto out;
-
- ret = -EFAULT;
- if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
- goto out_free_meta;
-
- bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
- if (IS_ERR(bip)) {
- ret = PTR_ERR(bip);
- goto out_free_meta;
- }
-
- bip->bip_iter.bi_size = len;
- bip->bip_iter.bi_sector = seed;
- ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
- offset_in_page(buf));
- if (ret != len) {
- ret = -ENOMEM;
- goto out_free_meta;
- }
-
- req->cmd_flags |= REQ_INTEGRITY;
- return buf;
-out_free_meta:
- kfree(buf);
-out:
- return ERR_PTR(ret);
-}
-
-static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
- void *meta, unsigned len, int ret)
-{
- if (!ret && req_op(req) == REQ_OP_DRV_IN &&
- copy_to_user(ubuf, meta, len))
- ret = -EFAULT;
- kfree(meta);
- return ret;
-}
-
static struct request *nvme_alloc_user_request(struct request_queue *q,
struct nvme_command *cmd, blk_opf_t rq_flags,
blk_mq_req_flags_t blk_flags)
@@ -161,16 +114,19 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
- unsigned int flags)
+ u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
+ bool has_metadata = meta_buffer && meta_len;
struct bio *bio = NULL;
- void *meta = NULL;
int ret;
+ if (has_metadata && !supports_metadata)
+ return -EINVAL;
+
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
struct iov_iter iter;
@@ -190,18 +146,16 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (ret)
goto out;
+
bio = req->bio;
if (bdev)
bio_set_dev(bio, bdev);
- if (bdev && meta_buffer && meta_len) {
- meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
- meta_seed);
- if (IS_ERR(meta)) {
- ret = PTR_ERR(meta);
+ if (has_metadata) {
+ ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len,
+ meta_seed);
+ if (ret)
goto out_unmap;
- }
- *metap = meta;
}
return ret;
@@ -222,7 +176,6 @@ static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl;
struct request *req;
- void *meta = NULL;
struct bio *bio;
u32 effects;
int ret;
@@ -234,7 +187,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, &meta, NULL, flags);
+ meta_len, meta_seed, NULL, flags);
if (ret)
return ret;
}
@@ -246,9 +199,6 @@ static int nvme_submit_user_cmd(struct request_queue *q,
ret = nvme_execute_rq(req, false);
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
- if (meta)
- ret = nvme_finish_user_metadata(req, meta_buffer, meta,
- meta_len, ret);
if (bio)
blk_rq_unmap_user(bio);
blk_mq_free_request(req);
@@ -280,10 +230,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return -EINVAL;
}
- length = (io.nblocks + 1) << ns->lba_shift;
+ length = (io.nblocks + 1) << ns->head->lba_shift;
if ((io.control & NVME_RW_PRINFO_PRACT) &&
- ns->ms == sizeof(struct t10_pi_tuple)) {
+ (ns->head->ms == ns->head->pi_size)) {
/*
* Protection information is stripped/inserted by the
* controller.
@@ -293,11 +243,11 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
meta_len = 0;
metadata = NULL;
} else {
- meta_len = (io.nblocks + 1) * ns->ms;
+ meta_len = (io.nblocks + 1) * ns->head->ms;
metadata = nvme_to_user_ptr(io.metadata);
}
- if (ns->features & NVME_NS_EXT_LBAS) {
+ if (ns->head->features & NVME_NS_EXT_LBAS) {
length += meta_len;
meta_len = 0;
} else if (meta_len) {
@@ -314,8 +264,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.control = cpu_to_le16(io.control);
c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
c.rw.reftag = cpu_to_le32(io.reftag);
- c.rw.apptag = cpu_to_le16(io.apptag);
- c.rw.appmask = cpu_to_le16(io.appmask);
+ c.rw.lbat = cpu_to_le16(io.apptag);
+ c.rw.lbatm = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
meta_len, lower_32_bits(io.slba), NULL, 0, 0);
@@ -443,19 +393,10 @@ struct nvme_uring_data {
* Expect build errors if this grows larger than that.
*/
struct nvme_uring_cmd_pdu {
- union {
- struct bio *bio;
- struct request *req;
- };
- u32 meta_len;
- u32 nvme_status;
- union {
- struct {
- void *meta; /* kernel-resident buffer */
- void __user *meta_buffer;
- };
- u64 result;
- } u;
+ struct request *req;
+ struct bio *bio;
+ u64 result;
+ int status;
};
static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
@@ -464,31 +405,6 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
- unsigned issue_flags)
-{
- struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- struct request *req = pdu->req;
- int status;
- u64 result;
-
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- status = -EINTR;
- else
- status = nvme_req(req)->status;
-
- result = le64_to_cpu(nvme_req(req)->result.u64);
-
- if (pdu->meta_len)
- status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
- pdu->u.meta, pdu->meta_len, status);
- if (req->bio)
- blk_rq_unmap_user(req->bio);
- blk_mq_free_request(req);
-
- io_uring_cmd_done(ioucmd, status, result, issue_flags);
-}
-
static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
unsigned issue_flags)
{
@@ -496,8 +412,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
-
- io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
+ io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
@@ -506,20 +421,24 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- req->bio = pdu->bio;
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- pdu->nvme_status = -EINTR;
+ pdu->status = -EINTR;
else
- pdu->nvme_status = nvme_req(req)->status;
- pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+ pdu->status = nvme_req(req)->status;
+ pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
- * For iopoll, complete it directly.
+ * For iopoll, complete it directly. Note that using the uring_cmd
+ * helper for this is safe only because we check blk_rq_is_poll().
+ * As that returns false if we're NOT on a polled queue, then it's
+ * safe to use the polled completion helper.
+ *
* Otherwise, move the completion to task work.
*/
if (blk_rq_is_poll(req)) {
- WRITE_ONCE(ioucmd->cookie, NULL);
- nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+ io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
} else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
}
@@ -527,29 +446,6 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
return RQ_END_IO_FREE;
}
-static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
- blk_status_t err)
-{
- struct io_uring_cmd *ioucmd = req->end_io_data;
- struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
-
- req->bio = pdu->bio;
- pdu->req = req;
-
- /*
- * For iopoll, complete it directly.
- * Otherwise, move the completion to task work.
- */
- if (blk_rq_is_poll(req)) {
- WRITE_ONCE(ioucmd->cookie, NULL);
- nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
- } else {
- io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb);
- }
-
- return RQ_END_IO_NONE;
-}
-
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
{
@@ -561,7 +457,6 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct request *req;
blk_opf_t rq_flags = REQ_ALLOC_CACHE;
blk_mq_req_flags_t blk_flags = 0;
- void *meta = NULL;
int ret;
c.common.opcode = READ_ONCE(cmd->opcode);
@@ -609,27 +504,16 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (d.addr && d.data_len) {
ret = nvme_map_user_request(req, d.addr,
d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, 0, &meta, ioucmd, vec);
+ d.metadata_len, 0, ioucmd, vec);
if (ret)
return ret;
}
- if (blk_rq_is_poll(req)) {
- ioucmd->flags |= IORING_URING_CMD_POLLED;
- WRITE_ONCE(ioucmd->cookie, req);
- }
-
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
- pdu->meta_len = d.metadata_len;
+ pdu->req = req;
req->end_io_data = ioucmd;
- if (pdu->meta_len) {
- pdu->u.meta = meta;
- pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
- req->end_io = nvme_uring_cmd_end_io_meta;
- } else {
- req->end_io = nvme_uring_cmd_end_io;
- }
+ req->end_io = nvme_uring_cmd_end_io;
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
}
@@ -780,16 +664,12 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob,
unsigned int poll_flags)
{
- struct request *req;
- int ret = 0;
-
- if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
- return 0;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ struct request *req = pdu->req;
- req = READ_ONCE(ioucmd->cookie);
if (req && blk_rq_is_poll(req))
- ret = blk_rq_poll(req, iob, poll_flags);
- return ret;
+ return blk_rq_poll(req, iob, poll_flags);
+ return 0;
}
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
@@ -914,15 +794,15 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
bool open_for_write)
{
struct nvme_ns *ns;
- int ret;
+ int ret, srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
if (list_empty(&ctrl->namespaces)) {
ret = -ENOTTY;
goto out_unlock;
}
- ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
+ ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list);
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
dev_warn(ctrl->device,
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
@@ -932,15 +812,18 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
- kref_get(&ns->kref);
- up_read(&ctrl->namespaces_rwsem);
+ if (!nvme_get_ns(ns)) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
nvme_put_ns(ns);
return ret;
out_unlock:
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return ret;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 0a88d7bdc5e3..48e7a8906d01 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -17,6 +17,7 @@ MODULE_PARM_DESC(multipath,
static const char *nvme_iopolicy_names[] = {
[NVME_IOPOLICY_NUMA] = "numa",
[NVME_IOPOLICY_RR] = "round-robin",
+ [NVME_IOPOLICY_QD] = "queue-depth",
};
static int iopolicy = NVME_IOPOLICY_NUMA;
@@ -29,6 +30,8 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
iopolicy = NVME_IOPOLICY_NUMA;
else if (!strncmp(val, "round-robin", 11))
iopolicy = NVME_IOPOLICY_RR;
+ else if (!strncmp(val, "queue-depth", 11))
+ iopolicy = NVME_IOPOLICY_QD;
else
return -EINVAL;
@@ -43,7 +46,7 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
&iopolicy, 0644);
MODULE_PARM_DESC(iopolicy,
- "Default multipath I/O policy; 'numa' (default) or 'round-robin'");
+ "Default multipath I/O policy; 'numa' (default), 'round-robin' or 'queue-depth'");
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
{
@@ -83,7 +86,7 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
- u16 status = nvme_req(req)->status & 0x7ff;
+ u16 status = nvme_req(req)->status & NVME_SCT_SC_MASK;
unsigned long flags;
struct bio *bio;
@@ -118,7 +121,8 @@ void nvme_failover_req(struct request *req)
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
- blk_mq_end_request(req, 0);
+ nvme_req(req)->status = 0;
+ nvme_end_req(req);
kblockd_schedule_work(&ns->head->requeue_work);
}
@@ -127,6 +131,11 @@ void nvme_mpath_start_request(struct request *rq)
struct nvme_ns *ns = rq->q->queuedata;
struct gendisk *disk = ns->head->disk;
+ if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
+ atomic_inc(&ns->ctrl->nr_active);
+ nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
+ }
+
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
return;
@@ -140,6 +149,9 @@ void nvme_mpath_end_request(struct request *rq)
{
struct nvme_ns *ns = rq->q->queuedata;
+ if (nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)
+ atomic_dec_if_positive(&ns->ctrl->nr_active);
+
if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
return;
bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
@@ -150,16 +162,17 @@ void nvme_mpath_end_request(struct request *rq)
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
static const char *nvme_ana_state_names[] = {
@@ -193,13 +206,14 @@ out:
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
nvme_mpath_clear_current_path(ns);
kblockd_schedule_work(&ns->head->requeue_work);
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
@@ -223,13 +237,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl);
+
/*
* We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
* still be able to complete assuming that the controller is connected.
* Otherwise it will fail immediately and return to the requeue list.
*/
- if (ns->ctrl->state != NVME_CTRL_LIVE &&
- ns->ctrl->state != NVME_CTRL_DELETING)
+ if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING)
return true;
if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
!test_bit(NVME_NS_READY, &ns->flags))
@@ -246,7 +261,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
if (nvme_path_is_disabled(ns))
continue;
- if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
+ if (ns->ctrl->numa_node != NUMA_NO_NODE &&
+ READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
distance = node_distance(node, ns->ctrl->numa_node);
else
distance = LOCAL_DISTANCE;
@@ -286,10 +302,15 @@ static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
}
-static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
- int node, struct nvme_ns *old)
+static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head)
{
struct nvme_ns *ns, *found = NULL;
+ int node = numa_node_id();
+ struct nvme_ns *old = srcu_dereference(head->current_path[node],
+ &head->srcu);
+
+ if (unlikely(!old))
+ return __nvme_find_path(head, node);
if (list_is_singular(&head->list)) {
if (nvme_path_is_disabled(old))
@@ -329,13 +350,49 @@ out:
return found;
}
+static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns;
+ unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX;
+ unsigned int depth;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (nvme_path_is_disabled(ns))
+ continue;
+
+ depth = atomic_read(&ns->ctrl->nr_active);
+
+ switch (ns->ana_state) {
+ case NVME_ANA_OPTIMIZED:
+ if (depth < min_depth_opt) {
+ min_depth_opt = depth;
+ best_opt = ns;
+ }
+ break;
+ case NVME_ANA_NONOPTIMIZED:
+ if (depth < min_depth_nonopt) {
+ min_depth_nonopt = depth;
+ best_nonopt = ns;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (min_depth_opt == 0)
+ return best_opt;
+ }
+
+ return best_opt ? best_opt : best_nonopt;
+}
+
static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
{
- return ns->ctrl->state == NVME_CTRL_LIVE &&
+ return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
ns->ana_state == NVME_ANA_OPTIMIZED;
}
-inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
+static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head)
{
int node = numa_node_id();
struct nvme_ns *ns;
@@ -343,22 +400,34 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
ns = srcu_dereference(head->current_path[node], &head->srcu);
if (unlikely(!ns))
return __nvme_find_path(head, node);
-
- if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
- return nvme_round_robin_path(head, node, ns);
if (unlikely(!nvme_path_is_optimized(ns)))
return __nvme_find_path(head, node);
return ns;
}
+inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
+{
+ switch (READ_ONCE(head->subsys->iopolicy)) {
+ case NVME_IOPOLICY_QD:
+ return nvme_queue_depth_path(head);
+ case NVME_IOPOLICY_RR:
+ return nvme_round_robin_path(head);
+ default:
+ return nvme_numa_path(head);
+ }
+}
+
static bool nvme_available_path(struct nvme_ns_head *head)
{
struct nvme_ns *ns;
+ if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
+ return NULL;
+
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
continue;
- switch (ns->ctrl->state) {
+ switch (nvme_ctrl_state(ns->ctrl)) {
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
@@ -422,6 +491,21 @@ static void nvme_ns_head_release(struct gendisk *disk)
nvme_put_ns_head(disk->private_data);
}
+static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id type)
+{
+ struct nvme_ns_head *head = disk->private_data;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (ns)
+ ret = nvme_ns_get_unique_id(ns, id, type);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
#ifdef CONFIG_BLK_DEV_ZONED
static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
@@ -449,6 +533,7 @@ const struct block_device_operations nvme_ns_head_ops = {
.ioctl = nvme_ns_head_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
.getgeo = nvme_getgeo,
+ .get_unique_id = nvme_ns_head_get_unique_id,
.report_zones = nvme_ns_head_report_zones,
.pr_ops = &nvme_pr_ops,
};
@@ -515,7 +600,7 @@ static void nvme_requeue_work(struct work_struct *work)
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
{
- bool vwc = false;
+ struct queue_limits lim;
mutex_init(&head->lock);
bio_list_init(&head->requeue_list);
@@ -531,36 +616,21 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
!nvme_is_unique_nsid(ctrl, head) || !multipath)
return 0;
- head->disk = blk_alloc_disk(ctrl->numa_node);
- if (!head->disk)
- return -ENOMEM;
+ blk_set_stacking_limits(&lim);
+ lim.dma_alignment = 3;
+ lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
+ if (head->ids.csi == NVME_CSI_ZNS)
+ lim.features |= BLK_FEAT_ZONED;
+ else
+ lim.max_zone_append_sectors = 0;
+
+ head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
+ if (IS_ERR(head->disk))
+ return PTR_ERR(head->disk);
head->disk->fops = &nvme_ns_head_ops;
head->disk->private_data = head;
sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance);
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
- blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue);
- /*
- * This assumes all controllers that refer to a namespace either
- * support poll queues or not. That is not a strict guarantee,
- * but if the assumption is wrong the effect is only suboptimal
- * performance but not correctness problem.
- */
- if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
- ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
- blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
-
- /* set to a default value of 512 until the disk is validated */
- blk_queue_logical_block_size(head->disk->queue, 512);
- blk_set_stacking_limits(&head->disk->queue->limits);
- blk_queue_dma_alignment(head->disk->queue, 3);
-
- /* we need to propagate up the VMC settings */
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- vwc = true;
- blk_queue_write_cache(head->disk->queue, vwc, vwc);
return 0;
}
@@ -579,9 +649,9 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
*/
if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
rc = device_add_disk(&head->subsys->dev, head->disk,
- nvme_ns_id_attr_groups);
+ nvme_ns_attr_groups);
if (rc) {
- clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
+ clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags);
return;
}
nvme_add_ns_head_cdev(head);
@@ -592,7 +662,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
int node, srcu_idx;
srcu_idx = srcu_read_lock(&head->srcu);
- for_each_node(node)
+ for_each_online_node(node)
__nvme_find_path(head, node);
srcu_read_unlock(&head->srcu, srcu_idx);
}
@@ -667,7 +737,7 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
* controller is ready.
*/
if (nvme_state_is_live(ns->ana_state) &&
- ns->ctrl->state == NVME_CTRL_LIVE)
+ nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
@@ -677,6 +747,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
unsigned *nr_change_groups = data;
struct nvme_ns *ns;
+ int srcu_idx;
dev_dbg(ctrl->device, "ANA group %d: %s.\n",
le32_to_cpu(desc->grpid),
@@ -688,8 +759,8 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (!nr_nsids)
return 0;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
unsigned nsid;
again:
nsid = le32_to_cpu(desc->nsids[n]);
@@ -702,7 +773,7 @@ again:
if (ns->head->ns_id > nsid)
goto again;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return 0;
}
@@ -748,7 +819,7 @@ static void nvme_ana_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return;
nvme_read_ana_log(ctrl);
@@ -796,6 +867,29 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
}
+static void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys,
+ int iopolicy)
+{
+ struct nvme_ctrl *ctrl;
+ int old_iopolicy = READ_ONCE(subsys->iopolicy);
+
+ if (old_iopolicy == iopolicy)
+ return;
+
+ WRITE_ONCE(subsys->iopolicy, iopolicy);
+
+ /* iopolicy changes clear the mpath by design */
+ mutex_lock(&nvme_subsystems_lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvme_mpath_clear_ctrl_paths(ctrl);
+ mutex_unlock(&nvme_subsystems_lock);
+
+ pr_notice("subsysnqn %s iopolicy changed from %s to %s\n",
+ subsys->subnqn,
+ nvme_iopolicy_names[old_iopolicy],
+ nvme_iopolicy_names[iopolicy]);
+}
+
static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -805,7 +899,7 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
- WRITE_ONCE(subsys->iopolicy, i);
+ nvme_subsys_iopolicy_update(subsys, i);
return count;
}
}
@@ -868,9 +962,6 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
nvme_mpath_set_live(ns);
}
- if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
- ns->head->disk->queue);
#ifdef CONFIG_BLK_DEV_ZONED
if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
ns->head->disk->nr_zones = ns->disk->nr_zones;
@@ -881,11 +972,16 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- kblockd_schedule_work(&head->requeue_work);
- if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk);
}
+ /*
+ * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
+ * to allow multipath to fail all I/O.
+ */
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -916,6 +1012,9 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
!(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
return 0;
+ /* initialize this in the identify path to cover controller resets */
+ atomic_set(&ctrl->nr_active, 0);
+
if (!ctrl->max_namespaces ||
ctrl->max_namespaces > le32_to_cpu(id->nn)) {
dev_err(ctrl->device,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f35647c470af..313a4f978a2c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -16,6 +16,7 @@
#include <linux/rcupdate.h>
#include <linux/wait.h>
#include <linux/t10-pi.h>
+#include <linux/ratelimit_types.h>
#include <trace/events/block.h>
@@ -48,6 +49,7 @@ extern unsigned int admin_timeout;
extern struct workqueue_struct *nvme_wq;
extern struct workqueue_struct *nvme_reset_wq;
extern struct workqueue_struct *nvme_delete_wq;
+extern struct mutex nvme_subsystems_lock;
/*
* List of workarounds for devices that required behavior not specified in
@@ -89,6 +91,11 @@ enum nvme_quirks {
NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
/*
+ * Problems seen with concurrent commands
+ */
+ NVME_QUIRK_QDEPTH_ONE = (1 << 6),
+
+ /*
* Set MEDIUM priority on SQ creation
*/
NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
@@ -156,6 +163,16 @@ enum nvme_quirks {
* No temperature thresholds for channels other than 0 (Composite).
*/
NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19),
+
+ /*
+ * Disables simple suspend/resume path.
+ */
+ NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
+
+ /*
+ * MSI (but not MSI-X) interrupts are broken and never fire.
+ */
+ NVME_QUIRK_BROKEN_MSI = (1 << 21),
};
/*
@@ -184,6 +201,7 @@ enum {
NVME_REQ_CANCELLED = (1 << 0),
NVME_REQ_USERCMD = (1 << 1),
NVME_MPATH_IO_STATS = (1 << 2),
+ NVME_MPATH_CNT_ACTIVE = (1 << 3),
};
static inline struct nvme_request *nvme_req(struct request *req)
@@ -251,11 +269,13 @@ enum nvme_ctrl_flags {
NVME_CTRL_STOPPED = 3,
NVME_CTRL_SKIP_ID_CNS_CS = 4,
NVME_CTRL_DIRTY_CAPABILITY = 5,
+ NVME_CTRL_FROZEN = 6,
};
struct nvme_ctrl {
bool comp_seen;
bool identified;
+ bool passthru_err_log_enabled;
enum nvme_ctrl_state state;
spinlock_t lock;
struct mutex scan_lock;
@@ -269,7 +289,8 @@ struct nvme_ctrl {
struct blk_mq_tag_set *tagset;
struct blk_mq_tag_set *admin_tagset;
struct list_head namespaces;
- struct rw_semaphore namespaces_rwsem;
+ struct mutex namespaces_lock;
+ struct srcu_struct srcu;
struct device ctrl_device;
struct device *device; /* char device */
#ifdef CONFIG_NVME_HWMON
@@ -285,7 +306,6 @@ struct nvme_ctrl {
struct opal_dev *opal_dev;
- char name[12];
u16 cntlid;
u16 mtfa;
@@ -296,14 +316,13 @@ struct nvme_ctrl {
u32 max_hw_sectors;
u32 max_segments;
u32 max_integrity_segments;
- u32 max_discard_sectors;
- u32 max_discard_segments;
u32 max_zeroes_sectors;
#ifdef CONFIG_BLK_DEV_ZONED
u32 max_zone_append;
#endif
u16 crdt[3];
u16 oncs;
+ u8 dmrl;
u32 dmrsl;
u16 oacs;
u16 sqsize;
@@ -347,9 +366,10 @@ struct nvme_ctrl {
size_t ana_log_size;
struct timer_list anatt_timer;
struct work_struct ana_work;
+ atomic_t nr_active;
#endif
-#ifdef CONFIG_NVME_AUTH
+#ifdef CONFIG_NVME_HOST_AUTH
struct work_struct dhchap_auth_work;
struct mutex dhchap_auth_mutex;
struct nvme_dhchap_queue_context *dhchap_ctxs;
@@ -357,6 +377,7 @@ struct nvme_ctrl {
struct nvme_dhchap_key *ctrl_key;
u16 transaction;
#endif
+ key_serial_t tls_pskid;
/* Power saving configuration */
u64 ps_max_latency_us;
@@ -386,9 +407,15 @@ struct nvme_ctrl {
enum nvme_dctype dctype;
};
+static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
+{
+ return READ_ONCE(ctrl->state);
+}
+
enum nvme_iopolicy {
NVME_IOPOLICY_NUMA,
NVME_IOPOLICY_RR,
+ NVME_IOPOLICY_QD,
};
struct nvme_subsystem {
@@ -438,13 +465,26 @@ struct nvme_ns_head {
struct list_head list;
struct srcu_struct srcu;
struct nvme_subsystem *subsys;
- unsigned ns_id;
struct nvme_ns_ids ids;
+ u8 lba_shift;
+ u16 ms;
+ u16 pi_size;
+ u8 pi_type;
+ u8 guard_type;
struct list_head entry;
struct kref ref;
bool shared;
- int instance;
+ bool passthru_err_log_enabled;
struct nvme_effects_log *effects;
+ u64 nuse;
+ unsigned ns_id;
+ int instance;
+#ifdef CONFIG_BLK_DEV_ZONED
+ u64 zsze;
+#endif
+ unsigned long features;
+
+ struct ratelimit_state rs_nuse;
struct cdev cdev;
struct device cdev_device;
@@ -469,7 +509,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
enum nvme_ns_features {
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
- NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */
+ NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */
};
struct nvme_ns {
@@ -486,17 +526,6 @@ struct nvme_ns {
struct kref kref;
struct nvme_ns_head *head;
- int lba_shift;
- u16 ms;
- u16 pi_size;
- u16 sgs;
- u32 sws;
- u8 pi_type;
- u8 guard_type;
-#ifdef CONFIG_BLK_DEV_ZONED
- u64 zsze;
-#endif
- unsigned long features;
unsigned long flags;
#define NVME_NS_REMOVING 0
#define NVME_NS_ANA_PENDING 2
@@ -507,13 +536,12 @@ struct nvme_ns {
struct device cdev_device;
struct nvme_fault_inject fault_inject;
-
};
/* NVMe ns supports metadata actions by the controller (generate/strip) */
-static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
+static inline bool nvme_ns_has_pi(struct nvme_ns_head *head)
{
- return ns->pi_type && ns->ms == ns->pi_size;
+ return head->pi_type && head->ms == head->pi_size;
}
struct nvme_ctrl_ops {
@@ -530,6 +558,7 @@ struct nvme_ctrl_ops {
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl);
+ int (*subsystem_reset)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
@@ -628,34 +657,25 @@ int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
- int ret;
-
- if (!ctrl->subsystem)
+ if (!ctrl->subsystem || !ctrl->ops->subsystem_reset)
return -ENOTTY;
- if (!nvme_wait_reset(ctrl))
- return -EBUSY;
-
- ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
- if (ret)
- return ret;
-
- return nvme_try_sched_reset(ctrl);
+ return ctrl->ops->subsystem_reset(ctrl);
}
/*
* Convert a 512B sector number to a device logical block number.
*/
-static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
+static inline u64 nvme_sect_to_lba(struct nvme_ns_head *head, sector_t sector)
{
- return sector >> (ns->lba_shift - SECTOR_SHIFT);
+ return sector >> (head->lba_shift - SECTOR_SHIFT);
}
/*
* Convert a device logical block number to a 512B sector number.
*/
-static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
+static inline sector_t nvme_lba_to_sect(struct nvme_ns_head *head, u64 lba)
{
- return lba << (ns->lba_shift - SECTOR_SHIFT);
+ return lba << (head->lba_shift - SECTOR_SHIFT);
}
/*
@@ -668,7 +688,7 @@ static inline u32 nvme_bytes_to_numd(size_t len)
static inline bool nvme_is_ana_error(u16 status)
{
- switch (status & 0x7ff) {
+ switch (status & NVME_SCT_SC_MASK) {
case NVME_SC_ANA_TRANSITION:
case NVME_SC_ANA_INACCESSIBLE:
case NVME_SC_ANA_PERSISTENT_LOSS:
@@ -681,7 +701,7 @@ static inline bool nvme_is_ana_error(u16 status)
static inline bool nvme_is_path_error(u16 status)
{
/* check for a status code type of 'path related status' */
- return (status & 0x700) == 0x300;
+ return (status & NVME_SCT_MASK) == NVME_SCT_PATH;
}
/*
@@ -724,6 +744,28 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
}
+/*
+ * Returns true for sink states that can't ever transition back to live.
+ */
+static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+{
+ switch (nvme_ctrl_state(ctrl)) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ return false;
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
+ case NVME_CTRL_DEAD:
+ return true;
+ default:
+ WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+ return true;
+ }
+}
+
+void nvme_end_req(struct request *req);
void nvme_complete_rq(struct request *req);
void nvme_complete_batch_req(struct request *req);
@@ -749,6 +791,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks);
+int nvme_add_ctrl(struct nvme_ctrl *ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
@@ -790,17 +833,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *req);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live);
+ bool queue_live, enum nvme_ctrl_state state);
static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
- if (likely(ctrl->state == NVME_CTRL_LIVE))
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (likely(state == NVME_CTRL_LIVE))
return true;
- if (ctrl->ops->flags & NVME_F_FABRICS &&
- ctrl->state == NVME_CTRL_DELETING)
+ if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING)
return queue_live;
- return __nvme_check_ready(ctrl, rq, queue_live);
+ return __nvme_check_ready(ctrl, rq, queue_live, state);
}
/*
@@ -821,12 +865,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
}
+/*
+ * Flags for __nvme_submit_sync_cmd()
+ */
+typedef __u32 __bitwise nvme_submit_flags_t;
+
+enum {
+ /* Insert request at the head of the queue */
+ NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0),
+ /* Set BLK_MQ_REQ_NOWAIT when allocating request */
+ NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
+ /* Set BLK_MQ_REQ_RESERVED when allocating request */
+ NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
+ /* Retry command when NVME_STATUS_DNR is not set in the result */
+ NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
+};
+
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int qid, int at_head,
- blk_mq_req_flags_t flags);
+ int qid, nvme_submit_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
@@ -861,10 +920,12 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
+int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ struct nvme_id_ns **id);
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
-extern const struct attribute_group *nvme_ns_id_attr_groups[];
+extern const struct attribute_group *nvme_ns_attr_groups[];
extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
extern const struct attribute_group nvme_dev_attrs_group;
@@ -914,6 +975,10 @@ extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;
+static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
+{
+ return disk->fops == &nvme_ns_head_ops;
+}
#else
#define multipath false
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
@@ -991,13 +1056,28 @@ static inline void nvme_mpath_start_request(struct request *rq)
static inline void nvme_mpath_end_request(struct request *rq)
{
}
+static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
+{
+ return false;
+}
#endif /* CONFIG_NVME_MULTIPATH */
-int nvme_revalidate_zones(struct nvme_ns *ns);
+int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
+ enum blk_unique_id type);
+
+struct nvme_zone_info {
+ u64 zone_size;
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
+};
+
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
+int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct nvme_zone_info *zi);
+void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
+ struct nvme_zone_info *zi);
#ifdef CONFIG_BLK_DEV_ZONED
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd,
enum nvme_zone_mgmt_action action);
@@ -1008,18 +1088,14 @@ static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
{
return BLK_STS_NOTSUPP;
}
-
-static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
-{
- dev_warn(ns->ctrl->device,
- "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
- return -EPROTONOSUPPORT;
-}
#endif
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
- return dev_to_disk(dev)->private_data;
+ struct gendisk *disk = dev_to_disk(dev);
+
+ WARN_ON(nvme_disk_is_ns_head(disk));
+ return disk->private_data;
}
#ifdef CONFIG_NVME_HWMON
@@ -1048,7 +1124,7 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
return ctrl->sgls & ((1 << 0) | (1 << 1));
}
-#ifdef CONFIG_NVME_AUTH
+#ifdef CONFIG_NVME_HOST_AUTH
int __init nvme_init_auth(void);
void __exit nvme_exit_auth(void);
int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
@@ -1075,7 +1151,7 @@ static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
}
static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
{
- return NVME_SC_AUTH_REQUIRED;
+ return -EPROTONOSUPPORT;
}
static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
#endif
@@ -1088,6 +1164,7 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+bool nvme_get_ns(struct nvme_ns *ns);
void nvme_put_ns(struct nvme_ns *ns);
static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
@@ -1096,35 +1173,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
}
#ifdef CONFIG_NVME_VERBOSE_ERRORS
-const unsigned char *nvme_get_error_status_str(u16 status);
-const unsigned char *nvme_get_opcode_str(u8 opcode);
-const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
-const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
+const char *nvme_get_error_status_str(u16 status);
+const char *nvme_get_opcode_str(u8 opcode);
+const char *nvme_get_admin_opcode_str(u8 opcode);
+const char *nvme_get_fabrics_opcode_str(u8 opcode);
#else /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const unsigned char *nvme_get_error_status_str(u16 status)
+static inline const char *nvme_get_error_status_str(u16 status)
{
return "I/O Error";
}
-static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
+static inline const char *nvme_get_opcode_str(u8 opcode)
{
return "I/O Cmd";
}
-static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+static inline const char *nvme_get_admin_opcode_str(u8 opcode)
{
return "Admin Cmd";
}
-static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
+static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
{
return "Fabrics Cmd";
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
+static inline const char *nvme_opcode_str(int qid, u8 opcode)
{
- if (opcode == nvme_fabrics_command)
- return nvme_get_fabrics_opcode_str(fctype);
return qid ? nvme_get_opcode_str(opcode) :
nvme_get_admin_opcode_str(opcode);
}
+
+static inline const char *nvme_fabrics_opcode_str(
+ int qid, const struct nvme_command *cmd)
+{
+ if (nvme_is_fabrics(cmd))
+ return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
+
+ return nvme_opcode_str(qid, cmd->common.opcode);
+}
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2f57da12d983..7990c3f22ecf 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -778,7 +778,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
- if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
+ bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
@@ -825,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct bio_vec bv = rq_integrity_vec(req);
- iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
- rq_dma_dir(req), 0);
+ iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
@@ -862,7 +863,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
nvme_start_request(req);
return BLK_STS_OK;
out_unmap_data:
- nvme_unmap_data(dev, req);
+ if (blk_rq_nr_phys_segments(req))
+ nvme_unmap_data(dev, req);
out_free_cmd:
nvme_cleanup_cmd(req);
return ret;
@@ -924,7 +926,6 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
return false;
- req->mq_hctx->tags->rqs[req->tag] = req;
return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
}
@@ -967,7 +968,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_unmap_page(dev->dev, iod->meta_dma,
- rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
+ rq_integrity_vec(req).bv_len, rq_dma_dir(req));
}
if (blk_rq_nr_phys_segments(req))
@@ -1143,6 +1144,41 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
spin_unlock(&nvmeq->sq_lock);
}
+static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+ int ret = 0;
+
+ /*
+ * Taking the shutdown_lock ensures the BAR mapping is not being
+ * altered by reset_work. Holding this lock before the RESETTING state
+ * change, if successful, also ensures nvme_remove won't be able to
+ * proceed to iounmap until we're done.
+ */
+ mutex_lock(&dev->shutdown_lock);
+ if (!dev->bar_mapped_size) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR);
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE);
+
+ /*
+ * Read controller status to flush the previous write and trigger a
+ * pcie read error.
+ */
+ readl(dev->bar + NVME_REG_CSTS);
+unlock:
+ mutex_unlock(&dev->shutdown_lock);
+ return ret;
+}
+
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
struct nvme_command c = { };
@@ -1234,7 +1270,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
/* If there is a reset/reinit ongoing, we shouldn't reset again. */
- switch (dev->ctrl.state) {
+ switch (nvme_ctrl_state(&dev->ctrl)) {
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
return false;
@@ -1274,7 +1310,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
dev_warn(dev->ctrl.device,
"Does your device have a faulty power saving mode enabled?\n");
dev_warn(dev->ctrl.device,
- "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
+ "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n");
}
static enum blk_eh_timer_return nvme_timeout(struct request *req)
@@ -1285,6 +1321,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
struct request *abort_req;
struct nvme_command cmd = { };
u32 csts = readl(dev->bar + NVME_REG_CSTS);
+ u8 opcode;
+
+ if (nvme_state_terminal(&dev->ctrl))
+ goto disable;
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
@@ -1311,8 +1351,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
dev_warn(dev->ctrl.device,
- "I/O %d QID %d timeout, completion polled\n",
- req->tag, nvmeq->qid);
+ "I/O tag %d (%04x) QID %d timeout, completion polled\n",
+ req->tag, nvme_cid(req), nvmeq->qid);
return BLK_EH_DONE;
}
@@ -1322,14 +1362,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* cancellation error. All outstanding requests are completed on
* shutdown, so we return BLK_EH_DONE.
*/
- switch (dev->ctrl.state) {
+ switch (nvme_ctrl_state(&dev->ctrl)) {
case NVME_CTRL_CONNECTING:
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
fallthrough;
case NVME_CTRL_DELETING:
dev_warn_ratelimited(dev->ctrl.device,
- "I/O %d QID %d timeout, disable controller\n",
- req->tag, nvmeq->qid);
+ "I/O tag %d (%04x) QID %d timeout, disable controller\n",
+ req->tag, nvme_cid(req), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
nvme_dev_disable(dev, true);
return BLK_EH_DONE;
@@ -1344,10 +1384,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* command was already aborted once before and still hasn't been
* returned to the driver, or if this is the admin queue.
*/
+ opcode = nvme_req(req)->cmd->common.opcode;
if (!nvmeq->qid || iod->aborted) {
dev_warn(dev->ctrl.device,
- "I/O %d QID %d timeout, reset controller\n",
- req->tag, nvmeq->qid);
+ "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
+ req->tag, nvme_cid(req), opcode,
+ nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
goto disable;
}
@@ -1363,10 +1405,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
- "I/O %d (%s) QID %d timeout, aborting\n",
- req->tag,
- nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
- nvmeq->qid);
+ "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n",
+ req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode),
+ nvmeq->qid, blk_op_str(req_op(req)), req_op(req),
+ blk_rq_bytes(req));
abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
BLK_MQ_REQ_NOWAIT);
@@ -1388,8 +1430,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
return BLK_EH_RESET_TIMER;
disable:
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
+ if (nvme_state_terminal(&dev->ctrl))
+ nvme_dev_disable(dev, true);
return BLK_EH_DONE;
+ }
nvme_dev_disable(dev, false);
if (nvme_try_sched_reset(&dev->ctrl))
@@ -1594,7 +1639,7 @@ static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
/*
* Controller is in wrong state, fail early.
*/
- if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
+ if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) {
mutex_unlock(&dev->shutdown_lock);
return -ENODEV;
}
@@ -2216,6 +2261,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.priv = dev,
};
unsigned int irq_queues, poll_queues;
+ unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY;
/*
* Poll queues don't need interrupts, but we need at least one I/O queue
@@ -2239,8 +2285,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
irq_queues = 1;
if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
irq_queues += (nr_io_queues - poll_queues);
- return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
- PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
+ flags &= ~PCI_IRQ_MSI;
+ return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags,
+ &affd);
}
static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
@@ -2460,6 +2508,12 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
{
+ if (!dev->ctrl.tagset) {
+ nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
+ nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
+ return;
+ }
+
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
/* free previously allocated queues that are no longer usable */
nvme_free_queues(dev, dev->online_queues);
@@ -2469,6 +2523,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ unsigned int flags = PCI_IRQ_ALL_TYPES;
if (pci_enable_device_mem(pdev))
return result;
@@ -2485,7 +2540,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
* interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
* adjust this later.
*/
- result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
+ flags &= ~PCI_IRQ_MSI;
+ result = pci_alloc_irq_vectors(pdev, 1, 1, flags);
if (result < 0)
goto disable;
@@ -2506,15 +2563,8 @@ static int nvme_pci_enable(struct nvme_dev *dev)
else
dev->io_sqes = NVME_NVM_IOSQES;
- /*
- * Temporary fix for the Apple controller found in the MacBook8,1 and
- * some MacBook7,1 to avoid controller resets and data loss.
- */
- if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
+ if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) {
dev->q_depth = 2;
- dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
- "set queue depth=%u to work around controller resets\n",
- dev->q_depth);
} else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
(pdev->device == 0xa821 || pdev->device == 0xa822) &&
NVME_CAP_MQES(dev->ctrl.cap) == 0) {
@@ -2574,13 +2624,13 @@ static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl);
struct pci_dev *pdev = to_pci_dev(dev->dev);
bool dead;
mutex_lock(&dev->shutdown_lock);
dead = nvme_pci_ctrl_is_dead(dev);
- if (dev->ctrl.state == NVME_CTRL_LIVE ||
- dev->ctrl.state == NVME_CTRL_RESETTING) {
+ if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) {
if (pci_is_enabled(pdev))
nvme_start_freeze(&dev->ctrl);
/*
@@ -2691,7 +2741,7 @@ static void nvme_reset_work(struct work_struct *work)
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result;
- if (dev->ctrl.state != NVME_CTRL_RESETTING) {
+ if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) {
dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
dev->ctrl.state);
result = -ENODEV;
@@ -2744,10 +2794,10 @@ static void nvme_reset_work(struct work_struct *work)
* controller around but remove all namespaces.
*/
if (dev->online_queues > 1) {
+ nvme_dbbuf_set(dev);
nvme_unquiesce_io_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
nvme_pci_update_nr_queues(dev);
- nvme_dbbuf_set(dev);
nvme_unfreeze(&dev->ctrl);
} else {
dev_warn(dev->ctrl.device, "IO queues lost\n");
@@ -2844,6 +2894,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.reg_read64 = nvme_pci_reg_read64,
.free_ctrl = nvme_pci_free_ctrl,
.submit_async_event = nvme_pci_submit_async_event,
+ .subsystem_reset = nvme_pci_subsystem_reset,
.get_address = nvme_pci_get_address,
.print_device_info = nvme_pci_print_device_info,
.supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
@@ -2903,8 +2954,38 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
return NVME_QUIRK_SIMPLE_SUSPEND;
+ } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 ||
+ pdev->device == 0x500f)) {
+ /*
+ * Exclude some Kingston NV1 and A2000 devices from
+ * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
+ * lot fo energy with s2idle sleep on some TUXEDO platforms.
+ */
+ if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
+ dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
+ dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
+ dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
+ return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
+ } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) {
+ /*
+ * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
+ * because of high power consumption (> 2 Watt) in s2idle
+ * sleep. Only some boards with Intel CPU are affected.
+ */
+ if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
+ dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
+ dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
+ return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
}
+ /*
+ * NVMe SSD drops off the PCIe bus after system idle
+ * for 10 hours on a Lenovo N60z board.
+ */
+ if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6"))
+ return NVME_QUIRK_NO_APST;
+
return 0;
}
@@ -2916,9 +2997,6 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
struct nvme_dev *dev;
int ret = -ENOMEM;
- if (node == NUMA_NO_NODE)
- set_dev_node(&pdev->dev, first_memory_node);
-
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return ERR_PTR(-ENOMEM);
@@ -2936,7 +3014,9 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
dev->dev = get_device(&pdev->dev);
quirks |= check_vendor_combination_bug(pdev);
- if (!noacpi && acpi_storage_d3(&pdev->dev)) {
+ if (!noacpi &&
+ !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) &&
+ acpi_storage_d3(&pdev->dev)) {
/*
* Some systems use a bios work around to ask for D3 on
* platforms that support kernel managed suspend.
@@ -2989,6 +3069,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_ERR(dev))
return PTR_ERR(dev);
+ result = nvme_add_ctrl(&dev->ctrl);
+ if (result)
+ goto out_put_ctrl;
+
result = nvme_dev_map(dev);
if (result)
goto out_uninit_ctrl;
@@ -3075,6 +3159,7 @@ out_dev_unmap:
nvme_dev_unmap(dev);
out_uninit_ctrl:
nvme_uninit_ctrl(&dev->ctrl);
+out_put_ctrl:
nvme_put_ctrl(&dev->ctrl);
return result;
}
@@ -3196,7 +3281,7 @@ static int nvme_suspend(struct device *dev)
nvme_wait_freeze(ctrl);
nvme_sync_queues(ctrl);
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
goto unfreeze;
/*
@@ -3332,7 +3417,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES |
- NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
@@ -3349,6 +3435,11 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
+ .driver_data = NVME_QUIRK_QDEPTH_ONE },
+ { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_BOGUS_NID, },
@@ -3367,6 +3458,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_DISABLE_WRITE_ZEROES|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
+ .driver_data = NVME_QUIRK_BROKEN_MSI },
{ PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
@@ -3397,6 +3490,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
@@ -3476,7 +3571,12 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
- .driver_data = NVME_QUIRK_SINGLE_VECTOR },
+ /*
+ * Fix for the Apple controller found in the MacBook8,1 and
+ * some MacBook7,1 to avoid controller resets and data loss.
+ */
+ .driver_data = NVME_QUIRK_SINGLE_VECTOR |
+ NVME_QUIRK_QDEPTH_ONE },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
@@ -3527,5 +3627,6 @@ static void __exit nvme_exit(void)
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("NVMe host PCIe transport driver");
module_init(nvme_init);
module_exit(nvme_exit);
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index 391b1465ebfd..7347ddf85f00 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -72,12 +72,12 @@ static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
}
-static int nvme_sc_to_pr_err(int nvme_sc)
+static int nvme_status_to_pr_err(int status)
{
- if (nvme_is_path_error(nvme_sc))
+ if (nvme_is_path_error(status))
return PR_STS_PATH_FAILED;
- switch (nvme_sc) {
+ switch (status & NVME_SCT_SC_MASK) {
case NVME_SC_SUCCESS:
return PR_STS_SUCCESS;
case NVME_SC_RESERVATION_CONFLICT:
@@ -97,8 +97,7 @@ static int nvme_sc_to_pr_err(int nvme_sc)
static int nvme_send_pr_command(struct block_device *bdev,
struct nvme_command *c, void *data, unsigned int data_len)
{
- if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
- bdev->bd_disk->fops == &nvme_ns_head_ops)
+ if (nvme_disk_is_ns_head(bdev->bd_disk))
return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
@@ -122,7 +121,7 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
if (ret < 0)
return ret;
- return nvme_sc_to_pr_err(ret);
+ return nvme_status_to_pr_err(ret);
}
static int nvme_pr_register(struct block_device *bdev, u64 old,
@@ -197,7 +196,7 @@ retry:
if (ret < 0)
return ret;
- return nvme_sc_to_pr_err(ret);
+ return nvme_status_to_pr_err(ret);
}
static int nvme_pr_read_keys(struct block_device *bdev,
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 337a624a537c..c8fd0e8f0237 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -638,6 +638,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
+ if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+ return;
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
__nvme_rdma_stop_queue(queue);
@@ -979,16 +982,18 @@ free_ctrl:
kfree(ctrl);
}
-static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
+static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl,
+ int status)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
/* If we are resetting/deleting then do nothing */
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
- WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
- ctrl->ctrl.state == NVME_CTRL_LIVE);
+ if (state != NVME_CTRL_CONNECTING) {
+ WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
return;
}
- if (nvmf_should_reconnect(&ctrl->ctrl)) {
+ if (nvmf_should_reconnect(&ctrl->ctrl, status)) {
dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
ctrl->ctrl.opts->reconnect_delay);
queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
@@ -1002,6 +1007,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{
int ret;
bool changed;
+ u16 max_queue_size;
ret = nvme_rdma_configure_admin_queue(ctrl, new);
if (ret)
@@ -1026,11 +1032,16 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
}
- if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
+ if (ctrl->ctrl.max_integrity_segments)
+ max_queue_size = NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
+ else
+ max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
+
+ if (ctrl->ctrl.sqsize + 1 > max_queue_size) {
dev_warn(ctrl->ctrl.device,
- "ctrl sqsize %u > max queue size %u, clamping down\n",
- ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
- ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
+ "ctrl sqsize %u > max queue size %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, max_queue_size);
+ ctrl->ctrl.sqsize = max_queue_size - 1;
}
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
@@ -1056,8 +1067,10 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
* unless we're during creation of a new controller to
* avoid races with teardown flow.
*/
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
@@ -1077,6 +1090,7 @@ destroy_io:
nvme_rdma_free_io_queues(ctrl);
}
destroy_admin:
+ nvme_stop_keep_alive(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
@@ -1091,10 +1105,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
{
struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvme_rdma_ctrl, reconnect_work);
+ int ret;
++ctrl->ctrl.nr_reconnects;
- if (nvme_rdma_setup_ctrl(ctrl, false))
+ ret = nvme_rdma_setup_ctrl(ctrl, false);
+ if (ret)
goto requeue;
dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
@@ -1105,9 +1121,9 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
return;
requeue:
- dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
- ctrl->ctrl.nr_reconnects);
- nvme_rdma_reconnect_or_remove(ctrl);
+ dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d/%d\n",
+ ctrl->ctrl.nr_reconnects, ctrl->ctrl.opts->max_reconnects);
+ nvme_rdma_reconnect_or_remove(ctrl, ret);
}
static void nvme_rdma_error_recovery_work(struct work_struct *work)
@@ -1125,12 +1141,14 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
return;
}
- nvme_rdma_reconnect_or_remove(ctrl);
+ nvme_rdma_reconnect_or_remove(ctrl, 0);
}
static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
@@ -1158,7 +1176,7 @@ static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
struct nvme_rdma_queue *queue = wc->qp->qp_context;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
dev_info(ctrl->ctrl.device,
"%s for CQE 0x%p failed with status %s (%d)\n",
op, wc->wr_cqe,
@@ -1345,8 +1363,8 @@ static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
if (control & NVME_RW_PRINFO_PRCHK_REF)
domain->sig.dif.ref_remap = true;
- domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
- domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm);
domain->sig.dif.app_escape = true;
if (pi_type == NVME_NS_DPS_PI_TYPE3)
domain->sig.dif.ref_escape = true;
@@ -1401,6 +1419,8 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
struct nvme_ns *ns = rq->q->queuedata;
struct bio *bio = rq->bio;
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ u32 xfer_len;
int nr;
req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
@@ -1413,8 +1433,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
if (unlikely(nr))
goto mr_put;
- nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
- req->mr->sig_attrs, ns->pi_type);
+ nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type);
nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
@@ -1432,7 +1451,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
IB_ACCESS_REMOTE_WRITE;
sg->addr = cpu_to_le64(req->mr->iova);
- put_unaligned_le24(req->mr->length, sg->length);
+ xfer_len = req->mr->length;
+ /* Check if PI is added by the HW */
+ if (!pi_count)
+ xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size;
+ put_unaligned_le24(xfer_len, sg->length);
put_unaligned_le32(req->mr->rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
@@ -1473,7 +1496,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
req->metadata_sgl->sg_table.sgl =
(struct scatterlist *)(req->metadata_sgl + 1);
ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
- blk_rq_count_integrity_sg(rq->q, rq->bio),
+ rq->nr_integrity_segments,
req->metadata_sgl->sg_table.sgl,
NVME_INLINE_METADATA_SG_CNT);
if (unlikely(ret)) {
@@ -1481,8 +1504,8 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
goto out_unmap_sg;
}
- req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
- rq->bio, req->metadata_sgl->sg_table.sgl);
+ req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq,
+ req->metadata_sgl->sg_table.sgl);
*pi_count = ib_dma_map_sg(ibdev,
req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents,
@@ -1853,6 +1876,8 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
*/
priv.hrqsize = cpu_to_le16(queue->queue_size);
priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
+ /* cntlid should only be set when creating an I/O queue */
+ priv.cntlid = cpu_to_le16(ctrl->ctrl.cntlid);
}
ret = rdma_connect_locked(queue->cm_id, &param);
@@ -1937,11 +1962,15 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ struct nvme_command *cmd = req->req.cmd;
+ int qid = nvme_rdma_queue_idx(queue);
- dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
- rq->tag, nvme_rdma_queue_idx(queue));
+ dev_warn(ctrl->ctrl.device,
+ "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
+ rq->tag, nvme_cid(rq), cmd->common.opcode,
+ nvme_fabrics_opcode_str(qid, cmd), qid);
- if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+ if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
/*
* If we are resetting, connecting or deleting we should
* complete immediately because we may block controller
@@ -2008,7 +2037,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
queue->pi_support &&
(c->common.opcode == nvme_cmd_write ||
c->common.opcode == nvme_cmd_read) &&
- nvme_ns_has_pi(ns))
+ nvme_ns_has_pi(ns->head))
req->use_sig_mr = true;
else
req->use_sig_mr = false;
@@ -2145,6 +2174,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
{
struct nvme_rdma_ctrl *ctrl =
container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
+ int ret;
nvme_stop_ctrl(&ctrl->ctrl);
nvme_rdma_shutdown_ctrl(ctrl, false);
@@ -2155,14 +2185,15 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
return;
}
- if (nvme_rdma_setup_ctrl(ctrl, false))
+ ret = nvme_rdma_setup_ctrl(ctrl, false);
+ if (ret)
goto out_fail;
return;
out_fail:
++ctrl->ctrl.nr_reconnects;
- nvme_rdma_reconnect_or_remove(ctrl);
+ nvme_rdma_reconnect_or_remove(ctrl, ret);
}
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@@ -2172,6 +2203,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
+ .subsystem_reset = nvmf_subsystem_reset,
.free_ctrl = nvme_rdma_free_ctrl,
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl,
@@ -2208,12 +2240,11 @@ nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
return found;
}
-static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
+static struct nvme_rdma_ctrl *nvme_rdma_alloc_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{
struct nvme_rdma_ctrl *ctrl;
int ret;
- bool changed;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -2275,6 +2306,30 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
if (ret)
goto out_kfree_queues;
+ return ctrl;
+
+out_kfree_queues:
+ kfree(ctrl->queues);
+out_free_ctrl:
+ kfree(ctrl);
+ return ERR_PTR(ret);
+}
+
+static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_rdma_ctrl *ctrl;
+ bool changed;
+ int ret;
+
+ ctrl = nvme_rdma_alloc_ctrl(dev, opts);
+ if (IS_ERR(ctrl))
+ return ERR_CAST(ctrl);
+
+ ret = nvme_add_ctrl(&ctrl->ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
WARN_ON_ONCE(!changed);
@@ -2282,8 +2337,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
if (ret)
goto out_uninit_ctrl;
- dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
- nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n",
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
@@ -2293,15 +2348,11 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
nvme_put_ctrl(&ctrl->ctrl);
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
-out_kfree_queues:
- kfree(ctrl->queues);
-out_free_ctrl:
- kfree(ctrl);
- return ERR_PTR(ret);
}
static struct nvmf_transport_ops nvme_rdma_transport = {
@@ -2386,4 +2437,5 @@ static void __exit nvme_rdma_cleanup_module(void)
module_init(nvme_rdma_init_module);
module_exit(nvme_rdma_cleanup_module);
+MODULE_DESCRIPTION("NVMe host RDMA transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index 212e1b05d298..b68a9e5f1ea3 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -35,16 +35,71 @@ static ssize_t nvme_sysfs_rescan(struct device *dev,
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf,
+ ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
+}
+
+static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ bool passthru_err_log_enabled;
+ int err;
+
+ err = kstrtobool(buf, &passthru_err_log_enabled);
+ if (err)
+ return -EINVAL;
+
+ ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
+
+ return count;
+}
+
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
- if (disk->fops == &nvme_bdev_ops)
- return nvme_get_ns_from_dev(dev)->head;
- else
+ if (nvme_disk_is_ns_head(disk))
return disk->private_data;
+ return nvme_get_ns_from_dev(dev)->head;
+}
+
+static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+
+ return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
}
+static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+ bool passthru_err_log_enabled;
+ int err;
+
+ err = kstrtobool(buf, &passthru_err_log_enabled);
+ if (err)
+ return -EINVAL;
+ head->passthru_err_log_enabled = passthru_err_log_enabled;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
+ __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
+ nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
+
+static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
+ __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
+ nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
+
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -114,20 +169,101 @@ static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(nsid);
-static struct attribute *nvme_ns_id_attrs[] = {
+static ssize_t csi_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi);
+}
+static DEVICE_ATTR_RO(csi);
+
+static ssize_t metadata_bytes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms);
+}
+static DEVICE_ATTR_RO(metadata_bytes);
+
+static int ns_head_update_nuse(struct nvme_ns_head *head)
+{
+ struct nvme_id_ns *id;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ /* Avoid issuing commands too often by rate limiting the update */
+ if (!__ratelimit(&head->rs_nuse))
+ return 0;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (!ns)
+ goto out_unlock;
+
+ ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id);
+ if (ret)
+ goto out_unlock;
+
+ head->nuse = le64_to_cpu(id->nuse);
+ kfree(id);
+
+out_unlock:
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+static int ns_update_nuse(struct nvme_ns *ns)
+{
+ struct nvme_id_ns *id;
+ int ret;
+
+ /* Avoid issuing commands too often by rate limiting the update. */
+ if (!__ratelimit(&ns->head->rs_nuse))
+ return 0;
+
+ ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
+ if (ret)
+ return ret;
+
+ ns->head->nuse = le64_to_cpu(id->nuse);
+ kfree(id);
+ return 0;
+}
+
+static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+ struct gendisk *disk = dev_to_disk(dev);
+ int ret;
+
+ if (nvme_disk_is_ns_head(disk))
+ ret = ns_head_update_nuse(head);
+ else
+ ret = ns_update_nuse(disk->private_data);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", head->nuse);
+}
+static DEVICE_ATTR_RO(nuse);
+
+static struct attribute *nvme_ns_attrs[] = {
&dev_attr_wwid.attr,
&dev_attr_uuid.attr,
&dev_attr_nguid.attr,
&dev_attr_eui.attr,
+ &dev_attr_csi.attr,
&dev_attr_nsid.attr,
+ &dev_attr_metadata_bytes.attr,
+ &dev_attr_nuse.attr,
#ifdef CONFIG_NVME_MULTIPATH
&dev_attr_ana_grpid.attr,
&dev_attr_ana_state.attr,
#endif
+ &dev_attr_io_passthru_err_log_enabled.attr,
NULL,
};
-static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
+static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -148,7 +284,8 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
}
#ifdef CONFIG_NVME_MULTIPATH
if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
- if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
+ /* per-path attr */
+ if (nvme_disk_is_ns_head(dev_to_disk(dev)))
return 0;
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
return 0;
@@ -157,13 +294,13 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-static const struct attribute_group nvme_ns_id_attr_group = {
- .attrs = nvme_ns_id_attrs,
- .is_visible = nvme_ns_id_attrs_are_visible,
+static const struct attribute_group nvme_ns_attr_group = {
+ .attrs = nvme_ns_attrs,
+ .is_visible = nvme_ns_attrs_are_visible,
};
-const struct attribute_group *nvme_ns_id_attr_groups[] = {
- &nvme_ns_id_attr_group,
+const struct attribute_group *nvme_ns_attr_groups[] = {
+ &nvme_ns_attr_group,
NULL,
};
@@ -226,6 +363,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned state = (unsigned)nvme_ctrl_state(ctrl);
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
@@ -236,9 +374,8 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
[NVME_CTRL_DEAD] = "dead",
};
- if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
- state_name[ctrl->state])
- return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
+ if (state < ARRAY_SIZE(state_name) && state_name[state])
+ return sysfs_emit(buf, "%s\n", state_name[state]);
return sysfs_emit(buf, "unknown state\n");
}
@@ -409,7 +546,7 @@ static ssize_t dctype_show(struct device *dev,
}
static DEVICE_ATTR_RO(dctype);
-#ifdef CONFIG_NVME_AUTH
+#ifdef CONFIG_NVME_HOST_AUTH
static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -550,10 +687,11 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_kato.attr,
&dev_attr_cntrltype.attr,
&dev_attr_dctype.attr,
-#ifdef CONFIG_NVME_AUTH
+#ifdef CONFIG_NVME_HOST_AUTH
&dev_attr_dhchap_secret.attr,
&dev_attr_dhchap_ctrl_secret.attr,
#endif
+ &dev_attr_adm_passthru_err_log_enabled.attr,
NULL
};
@@ -577,7 +715,7 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
return 0;
-#ifdef CONFIG_NVME_AUTH
+#ifdef CONFIG_NVME_HOST_AUTH
if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
return 0;
if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
@@ -593,8 +731,78 @@ const struct attribute_group nvme_dev_attrs_group = {
};
EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
+#ifdef CONFIG_NVME_TCP_TLS
+static ssize_t tls_key_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (!ctrl->tls_pskid)
+ return 0;
+ return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid);
+}
+static DEVICE_ATTR_RO(tls_key);
+
+static ssize_t tls_configured_key_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct key *key = ctrl->opts->tls_key;
+
+ return sysfs_emit(buf, "%08x\n", key_serial(key));
+}
+static DEVICE_ATTR_RO(tls_configured_key);
+
+static ssize_t tls_keyring_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct key *keyring = ctrl->opts->keyring;
+
+ return sysfs_emit(buf, "%s\n", keyring->description);
+}
+static DEVICE_ATTR_RO(tls_keyring);
+
+static struct attribute *nvme_tls_attrs[] = {
+ &dev_attr_tls_key.attr,
+ &dev_attr_tls_configured_key.attr,
+ &dev_attr_tls_keyring.attr,
+ NULL,
+};
+
+static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp"))
+ return 0;
+
+ if (a == &dev_attr_tls_key.attr &&
+ !ctrl->opts->tls)
+ return 0;
+ if (a == &dev_attr_tls_configured_key.attr &&
+ !ctrl->opts->tls_key)
+ return 0;
+ if (a == &dev_attr_tls_keyring.attr &&
+ !ctrl->opts->keyring)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group nvme_tls_attrs_group = {
+ .attrs = nvme_tls_attrs,
+ .is_visible = nvme_tls_attrs_are_visible,
+};
+#endif
+
const struct attribute_group *nvme_dev_attr_groups[] = {
&nvme_dev_attrs_group,
+#ifdef CONFIG_NVME_TCP_TLS
+ &nvme_tls_attrs_group,
+#endif
NULL,
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 5b332d9f87fc..89c44413c593 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -8,9 +8,14 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/key.h>
#include <linux/nvme-tcp.h>
+#include <linux/nvme-keyring.h>
#include <net/sock.h>
#include <net/tcp.h>
+#include <net/tls.h>
+#include <net/tls_prot.h>
+#include <net/handshake.h>
#include <linux/blk-mq.h>
#include <crypto/hash.h>
#include <net/busy_poll.h>
@@ -31,6 +36,24 @@ static int so_priority;
module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
+/*
+ * Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu affinity
+ * from sysfs.
+ */
+static bool wq_unbound;
+module_param(wq_unbound, bool, 0644);
+MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO context (default false)");
+
+/*
+ * TLS handshake timeout
+ */
+static int tls_handshake_timeout = 10;
+#ifdef CONFIG_NVME_TCP_TLS
+module_param(tls_handshake_timeout, int, 0644);
+MODULE_PARM_DESC(tls_handshake_timeout,
+ "nvme TLS handshake timeout in seconds (default 10)");
+#endif
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep can detect a circular dependency of the form
* sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
@@ -142,11 +165,13 @@ struct nvme_tcp_queue {
bool hdr_digest;
bool data_digest;
+ bool tls_enabled;
struct ahash_request *rcv_hash;
struct ahash_request *snd_hash;
__le32 exp_ddgst;
__le32 recv_ddgst;
-
+ struct completion tls_complete;
+ int tls_err;
struct page_frag_cache pf_cache;
void (*state_change)(struct sock *);
@@ -189,6 +214,28 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
+/*
+ * Check if the queue is TLS encrypted
+ */
+static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue)
+{
+ if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
+ return 0;
+
+ return queue->tls_enabled;
+}
+
+/*
+ * Check if TLS is configured for the controller.
+ */
+static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl)
+{
+ if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
+ return 0;
+
+ return ctrl->opts->tls;
+}
+
static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
{
u32 queue_idx = nvme_tcp_queue_id(queue);
@@ -328,12 +375,18 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
} while (ret > 0);
}
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
{
return !list_empty(&queue->send_list) ||
!llist_empty(&queue->req_list);
}
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !nvme_tcp_queue_tls(queue) &&
+ nvme_tcp_queue_has_pending(queue);
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -354,7 +407,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
mutex_unlock(&queue->send_mutex);
}
- if (last && nvme_tcp_queue_more(queue))
+ if (last && nvme_tcp_queue_has_pending(queue))
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
@@ -1013,7 +1066,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
else
msg.msg_flags |= MSG_MORE;
- if (!sendpage_ok(page))
+ if (!sendpages_ok(page, len, offset))
msg.msg_flags &= ~MSG_SPLICE_PAGES;
bvec_set_page(&bvec, page, len, offset);
@@ -1320,7 +1373,6 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
- struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
unsigned int noreclaim_flag;
@@ -1331,14 +1383,12 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
- if (queue->pf_cache.va) {
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
- queue->pf_cache.va = NULL;
- }
+ page_frag_cache_drain(&queue->pf_cache);
noreclaim_flag = memalloc_noreclaim_save();
- sock_release(queue->sock);
+ /* ->sock will be released by fput() */
+ fput(queue->sock->file);
+ queue->sock = NULL;
memalloc_noreclaim_restore(noreclaim_flag);
kfree(queue->pdu);
@@ -1350,6 +1400,8 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_icreq_pdu *icreq;
struct nvme_tcp_icresp_pdu *icresp;
+ char cbuf[CMSG_LEN(sizeof(char))] = {};
+ u8 ctype;
struct msghdr msg = {};
struct kvec iov;
bool ctrl_hdgst, ctrl_ddgst;
@@ -1381,17 +1433,36 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
iov.iov_base = icreq;
iov.iov_len = sizeof(*icreq);
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
- if (ret < 0)
+ if (ret < 0) {
+ pr_warn("queue %d: failed to send icreq, error %d\n",
+ nvme_tcp_queue_id(queue), ret);
goto free_icresp;
+ }
memset(&msg, 0, sizeof(msg));
iov.iov_base = icresp;
iov.iov_len = sizeof(*icresp);
+ if (nvme_tcp_queue_tls(queue)) {
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+ }
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
- if (ret < 0)
+ if (ret < 0) {
+ pr_warn("queue %d: failed to receive icresp, error %d\n",
+ nvme_tcp_queue_id(queue), ret);
goto free_icresp;
-
+ }
+ ret = -ENOTCONN;
+ if (nvme_tcp_queue_tls(queue)) {
+ ctype = tls_get_record_type(queue->sock->sk,
+ (struct cmsghdr *)cbuf);
+ if (ctype != TLS_RECORD_TYPE_DATA) {
+ pr_err("queue %d: unhandled TLS record %d\n",
+ nvme_tcp_queue_id(queue), ctype);
+ goto free_icresp;
+ }
+ }
ret = -EINVAL;
if (icresp->hdr.type != nvme_tcp_icresp) {
pr_err("queue %d: bad type returned %d\n",
@@ -1504,14 +1575,99 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
else if (nvme_tcp_poll_queue(queue))
n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
ctrl->io_queues[HCTX_TYPE_READ] - 1;
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ if (wq_unbound)
+ queue->io_cpu = WORK_CPU_UNBOUND;
+ else
+ queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+}
+
+static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
+{
+ struct nvme_tcp_queue *queue = data;
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+ struct key *tls_key;
+
+ dev_dbg(ctrl->ctrl.device, "queue %d: TLS handshake done, key %x, status %d\n",
+ qid, pskid, status);
+
+ if (status) {
+ queue->tls_err = -status;
+ goto out_complete;
+ }
+
+ tls_key = nvme_tls_key_lookup(pskid);
+ if (IS_ERR(tls_key)) {
+ dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n",
+ qid, pskid);
+ queue->tls_err = -ENOKEY;
+ } else {
+ queue->tls_enabled = true;
+ if (qid == 0)
+ ctrl->ctrl.tls_pskid = key_serial(tls_key);
+ key_put(tls_key);
+ queue->tls_err = 0;
+ }
+
+out_complete:
+ complete(&queue->tls_complete);
}
-static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
+static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
+ struct nvme_tcp_queue *queue,
+ key_serial_t pskid)
+{
+ int qid = nvme_tcp_queue_id(queue);
+ int ret;
+ struct tls_handshake_args args;
+ unsigned long tmo = tls_handshake_timeout * HZ;
+ key_serial_t keyring = nvme_keyring_id();
+
+ dev_dbg(nctrl->device, "queue %d: start TLS with key %x\n",
+ qid, pskid);
+ memset(&args, 0, sizeof(args));
+ args.ta_sock = queue->sock;
+ args.ta_done = nvme_tcp_tls_done;
+ args.ta_data = queue;
+ args.ta_my_peerids[0] = pskid;
+ args.ta_num_peerids = 1;
+ if (nctrl->opts->keyring)
+ keyring = key_serial(nctrl->opts->keyring);
+ args.ta_keyring = keyring;
+ args.ta_timeout_ms = tls_handshake_timeout * 1000;
+ queue->tls_err = -EOPNOTSUPP;
+ init_completion(&queue->tls_complete);
+ ret = tls_client_hello_psk(&args, GFP_KERNEL);
+ if (ret) {
+ dev_err(nctrl->device, "queue %d: failed to start TLS: %d\n",
+ qid, ret);
+ return ret;
+ }
+ ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, tmo);
+ if (ret <= 0) {
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ dev_err(nctrl->device,
+ "queue %d: TLS handshake failed, error %d\n",
+ qid, ret);
+ tls_handshake_cancel(queue->sock->sk);
+ } else {
+ dev_dbg(nctrl->device,
+ "queue %d: TLS handshake complete, error %d\n",
+ qid, queue->tls_err);
+ ret = queue->tls_err;
+ }
+ return ret;
+}
+
+static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
+ key_serial_t pskid)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
int ret, rcv_pdu_size;
+ struct file *sock_file;
mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
@@ -1534,6 +1690,11 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
goto err_destroy_mutex;
}
+ sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
+ if (IS_ERR(sock_file)) {
+ ret = PTR_ERR(sock_file);
+ goto err_destroy_mutex;
+ }
nvme_tcp_reclassify_socket(queue->sock);
/* Single syn retry */
@@ -1624,6 +1785,13 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
goto err_rcv_pdu;
}
+ /* If PSKs are configured try to start TLS */
+ if (nvme_tcp_tls_configured(nctrl) && pskid) {
+ ret = nvme_tcp_start_tls(nctrl, queue, pskid);
+ if (ret)
+ goto err_init_connect;
+ }
+
ret = nvme_tcp_init_connection(queue);
if (ret)
goto err_init_connect;
@@ -1640,7 +1808,8 @@ err_crypto:
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
err_sock:
- sock_release(queue->sock);
+ /* ->sock will be released by fput() */
+ fput(queue->sock->file);
queue->sock = NULL;
err_destroy_mutex:
mutex_destroy(&queue->send_mutex);
@@ -1678,6 +1847,8 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
+ /* Stopping the queue will disable TLS */
+ queue->tls_enabled = false;
mutex_unlock(&queue->queue_lock);
}
@@ -1772,8 +1943,23 @@ out_stop_queues:
static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
{
int ret;
+ key_serial_t pskid = 0;
+
+ if (nvme_tcp_tls_configured(ctrl)) {
+ if (ctrl->opts->tls_key)
+ pskid = key_serial(ctrl->opts->tls_key);
+ else {
+ pskid = nvme_tls_psk_default(ctrl->opts->keyring,
+ ctrl->opts->host->nqn,
+ ctrl->opts->subsysnqn);
+ if (!pskid) {
+ dev_err(ctrl->device, "no valid PSK found\n");
+ return -ENOKEY;
+ }
+ }
+ }
- ret = nvme_tcp_alloc_queue(ctrl, 0);
+ ret = nvme_tcp_alloc_queue(ctrl, 0, pskid);
if (ret)
return ret;
@@ -1792,8 +1978,14 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
+ if (nvme_tcp_tls_configured(ctrl) && !ctrl->tls_pskid) {
+ dev_err(ctrl->device, "no PSK negotiated\n");
+ return -ENOKEY;
+ }
+
for (i = 1; i < ctrl->queue_count; i++) {
- ret = nvme_tcp_alloc_queue(ctrl, i);
+ ret = nvme_tcp_alloc_queue(ctrl, i,
+ ctrl->tls_pskid);
if (ret)
goto out_free_queues;
}
@@ -1974,6 +2166,11 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
if (remove)
nvme_unquiesce_admin_queue(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, remove);
+ if (ctrl->tls_pskid) {
+ dev_dbg(ctrl->device, "Wipe negotiated TLS_PSK %08x\n",
+ ctrl->tls_pskid);
+ ctrl->tls_pskid = 0;
+ }
}
static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
@@ -1991,22 +2188,25 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
nvme_tcp_destroy_io_queues(ctrl, remove);
}
-static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
+static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
+ int status)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
/* If we are resetting/deleting then do nothing */
- if (ctrl->state != NVME_CTRL_CONNECTING) {
- WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
- ctrl->state == NVME_CTRL_LIVE);
+ if (state != NVME_CTRL_CONNECTING) {
+ WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
return;
}
- if (nvmf_should_reconnect(ctrl)) {
+ if (nvmf_should_reconnect(ctrl, status)) {
dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
ctrl->opts->reconnect_delay);
queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
ctrl->opts->reconnect_delay * HZ);
} else {
- dev_info(ctrl->device, "Removing controller...\n");
+ dev_info(ctrl->device, "Removing controller (%d)...\n",
+ status);
nvme_delete_ctrl(ctrl);
}
}
@@ -2056,8 +2256,10 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
* unless we're during creation of a new controller to
* avoid races with teardown flow.
*/
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DELETING_NOIO);
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
@@ -2075,11 +2277,8 @@ destroy_io:
nvme_tcp_destroy_io_queues(ctrl, new);
}
destroy_admin:
- nvme_quiesce_admin_queue(ctrl);
- blk_sync_queue(ctrl->admin_q);
- nvme_tcp_stop_queue(ctrl, 0);
- nvme_cancel_admin_tagset(ctrl);
- nvme_tcp_destroy_admin_queue(ctrl, new);
+ nvme_stop_keep_alive(ctrl);
+ nvme_tcp_teardown_admin_queue(ctrl, false);
return ret;
}
@@ -2088,23 +2287,25 @@ static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
struct nvme_tcp_ctrl, connect_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ int ret;
++ctrl->nr_reconnects;
- if (nvme_tcp_setup_ctrl(ctrl, false))
+ ret = nvme_tcp_setup_ctrl(ctrl, false);
+ if (ret)
goto requeue;
- dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
- ctrl->nr_reconnects);
+ dev_info(ctrl->device, "Successfully reconnected (attempt %d/%d)\n",
+ ctrl->nr_reconnects, ctrl->opts->max_reconnects);
ctrl->nr_reconnects = 0;
return;
requeue:
- dev_info(ctrl->device, "Failed reconnect attempt %d\n",
- ctrl->nr_reconnects);
- nvme_tcp_reconnect_or_remove(ctrl);
+ dev_info(ctrl->device, "Failed reconnect attempt %d/%d\n",
+ ctrl->nr_reconnects, ctrl->opts->max_reconnects);
+ nvme_tcp_reconnect_or_remove(ctrl, ret);
}
static void nvme_tcp_error_recovery_work(struct work_struct *work)
@@ -2124,12 +2325,14 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DELETING_NOIO);
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
return;
}
- nvme_tcp_reconnect_or_remove(ctrl);
+ nvme_tcp_reconnect_or_remove(ctrl, 0);
}
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
@@ -2149,25 +2352,29 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, reset_work);
+ int ret;
nvme_stop_ctrl(ctrl);
nvme_tcp_teardown_ctrl(ctrl, false);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DELETING_NOIO);
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
return;
}
- if (nvme_tcp_setup_ctrl(ctrl, false))
+ ret = nvme_tcp_setup_ctrl(ctrl, false);
+ if (ret)
goto out_fail;
return;
out_fail:
++ctrl->nr_reconnects;
- nvme_tcp_reconnect_or_remove(ctrl);
+ nvme_tcp_reconnect_or_remove(ctrl, ret);
}
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
@@ -2266,15 +2473,15 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
- u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
+ struct nvme_command *cmd = &pdu->cmd;
int qid = nvme_tcp_queue_id(req->queue);
dev_warn(ctrl->device,
- "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
- nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
- opc, nvme_opcode_str(qid, opc, fctype));
+ "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
+ rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
+ nvme_fabrics_opcode_str(qid, cmd), qid);
- if (ctrl->state != NVME_CTRL_LIVE) {
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
/*
* If we are resetting, connecting or deleting we should
* complete immediately because we may block controller
@@ -2482,6 +2689,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
+ .subsystem_reset = nvmf_subsystem_reset,
.free_ctrl = nvme_tcp_free_ctrl,
.submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl,
@@ -2506,7 +2714,7 @@ nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
return found;
}
-static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
+static struct nvme_tcp_ctrl *nvme_tcp_alloc_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{
struct nvme_tcp_ctrl *ctrl;
@@ -2581,6 +2789,28 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
if (ret)
goto out_kfree_queues;
+ return ctrl;
+out_kfree_queues:
+ kfree(ctrl->queues);
+out_free_ctrl:
+ kfree(ctrl);
+ return ERR_PTR(ret);
+}
+
+static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_tcp_ctrl *ctrl;
+ int ret;
+
+ ctrl = nvme_tcp_alloc_ctrl(dev, opts);
+ if (IS_ERR(ctrl))
+ return ERR_CAST(ctrl);
+
+ ret = nvme_add_ctrl(&ctrl->ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
WARN_ON_ONCE(1);
ret = -EINTR;
@@ -2591,8 +2821,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
if (ret)
goto out_uninit_ctrl;
- dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
- nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
@@ -2602,15 +2832,11 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
nvme_put_ctrl(&ctrl->ctrl);
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
-out_kfree_queues:
- kfree(ctrl->queues);
-out_free_ctrl:
- kfree(ctrl);
- return ERR_PTR(ret);
}
static struct nvmf_transport_ops nvme_tcp_transport = {
@@ -2621,12 +2847,15 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
- NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
+ NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE | NVMF_OPT_TLS |
+ NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY,
.create_ctrl = nvme_tcp_create_ctrl,
};
static int __init nvme_tcp_init_module(void)
{
+ unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
@@ -2636,8 +2865,10 @@ static int __init nvme_tcp_init_module(void)
BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
- nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (wq_unbound)
+ wq_flags |= WQ_UNBOUND;
+
+ nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", wq_flags, 0);
if (!nvme_tcp_wq)
return -ENOMEM;
@@ -2663,4 +2894,5 @@ static void __exit nvme_tcp_cleanup_module(void)
module_init(nvme_tcp_init_module);
module_exit(nvme_tcp_cleanup_module);
+MODULE_DESCRIPTION("NVMe host TCP transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 1c36fcedea20..0288315f0050 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -119,7 +119,10 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
- u8 lbaf = cdw10[0] & 0xF;
+ /*
+ * lbafu(bit 13:12) is already in the upper 4 bits, lbafl: bit 03:00.
+ */
+ u8 lbaf = (cdw10[1] & 0x30) | (cdw10[0] & 0xF);
u8 mset = (cdw10[0] >> 4) & 0x1;
u8 pi = (cdw10[0] >> 5) & 0x7;
u8 pil = cdw10[1] & 0x1;
@@ -164,12 +167,27 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const zsa_strs[] = {
+ [0x01] = "close zone",
+ [0x02] = "finish zone",
+ [0x03] = "open zone",
+ [0x04] = "reset zone",
+ [0x05] = "offline zone",
+ [0x10] = "set zone descriptor extension"
+ };
const char *ret = trace_seq_buffer_ptr(p);
u64 slba = get_unaligned_le64(cdw10);
+ const char *zsa_str;
u8 zsa = cdw10[12];
u8 all = cdw10[13];
- trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
+ if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa])
+ zsa_str = zsa_strs[zsa];
+ else
+ zsa_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u",
+ slba, zsa, zsa_str, all);
trace_seq_putc(p, 0);
return ret;
@@ -177,15 +195,86 @@ static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const zrasf_strs[] = {
+ [0x00] = "list all zones",
+ [0x01] = "list the zones in the ZSE: Empty state",
+ [0x02] = "list the zones in the ZSIO: Implicitly Opened state",
+ [0x03] = "list the zones in the ZSEO: Explicitly Opened state",
+ [0x04] = "list the zones in the ZSC: Closed state",
+ [0x05] = "list the zones in the ZSF: Full state",
+ [0x06] = "list the zones in the ZSRO: Read Only state",
+ [0x07] = "list the zones in the ZSO: Offline state",
+ [0x09] = "list the zones that have the zone attribute"
+ };
const char *ret = trace_seq_buffer_ptr(p);
u64 slba = get_unaligned_le64(cdw10);
u32 numd = get_unaligned_le32(cdw10 + 8);
u8 zra = cdw10[12];
u8 zrasf = cdw10[13];
+ const char *zrasf_str;
u8 pr = cdw10[14];
- trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
- slba, numd, zra, zrasf, pr);
+ if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf])
+ zrasf_str = zrasf_strs[zrasf];
+ else
+ zrasf_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
+ slba, numd, zra, zrasf, zrasf_str, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_reg(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrega = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 ptpl = (cdw10[3] >> 6) & 0x3;
+
+ trace_seq_printf(p, "rrega=%u, iekey=%u, ptpl=%u",
+ rrega, iekey, ptpl);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 racqa = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+
+ trace_seq_printf(p, "racqa=%u, iekey=%u, rtype=%u",
+ racqa, iekey, rtype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_rel(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrela = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+
+ trace_seq_printf(p, "rrela=%u, iekey=%u, rtype=%u",
+ rrela, iekey, rtype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_report(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u32 numd = get_unaligned_le32(cdw10);
+ u8 eds = cdw10[4] & 0x1;
+
+ trace_seq_printf(p, "numd=%u, eds=%u", numd, eds);
trace_seq_putc(p, 0);
return ret;
@@ -243,6 +332,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
return nvme_trace_zone_mgmt_send(p, cdw10);
case nvme_cmd_zone_mgmt_recv:
return nvme_trace_zone_mgmt_recv(p, cdw10);
+ case nvme_cmd_resv_register:
+ return nvme_trace_resv_reg(p, cdw10);
+ case nvme_cmd_resv_acquire:
+ return nvme_trace_resv_acq(p, cdw10);
+ case nvme_cmd_resv_release:
+ return nvme_trace_resv_rel(p, cdw10);
+ case nvme_cmd_resv_report:
+ return nvme_trace_resv_report(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index ec8557810c21..9a06f9d98cd6 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -7,16 +7,6 @@
#include <linux/vmalloc.h>
#include "nvme.h"
-int nvme_revalidate_zones(struct nvme_ns *ns)
-{
- struct request_queue *q = ns->queue;
-
- blk_queue_chunk_sectors(q, ns->zsze);
- blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
-
- return blk_revalidate_disk_zones(ns->disk, NULL);
-}
-
static int nvme_set_max_append(struct nvme_ctrl *ctrl)
{
struct nvme_command c = { };
@@ -45,10 +35,10 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
return 0;
}
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
+int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct nvme_zone_info *zi)
{
struct nvme_effects_log *log = ns->head->effects;
- struct request_queue *q = ns->queue;
struct nvme_command c = { };
struct nvme_id_ns_zns *id;
int status;
@@ -99,24 +89,33 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
goto free_data;
}
- ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze));
- if (!is_power_of_2(ns->zsze)) {
+ zi->zone_size = le64_to_cpu(id->lbafe[lbaf].zsze);
+ if (!is_power_of_2(zi->zone_size)) {
dev_warn(ns->ctrl->device,
- "invalid zone size:%llu for namespace:%u\n",
- ns->zsze, ns->head->ns_id);
+ "invalid zone size: %llu for namespace: %u\n",
+ zi->zone_size, ns->head->ns_id);
status = -ENODEV;
goto free_data;
}
+ zi->max_open_zones = le32_to_cpu(id->mor) + 1;
+ zi->max_active_zones = le32_to_cpu(id->mar) + 1;
- disk_set_zoned(ns->disk, BLK_ZONED_HM);
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1);
- disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1);
free_data:
kfree(id);
return status;
}
+void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
+ struct nvme_zone_info *zi)
+{
+ lim->features |= BLK_FEAT_ZONED;
+ lim->max_open_zones = zi->max_open_zones;
+ lim->max_active_zones = zi->max_active_zones;
+ lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
+ lim->chunk_sectors = ns->head->zsze =
+ nvme_lba_to_sect(ns->head, zi->zone_size);
+}
+
static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
unsigned int nr_zones, size_t *buflen)
{
@@ -128,7 +127,7 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
sizeof(struct nvme_zone_descriptor);
nr_zones = min_t(unsigned int, nr_zones,
- get_capacity(ns->disk) >> ilog2(ns->zsze));
+ get_capacity(ns->disk) >> ilog2(ns->head->zsze));
bufsize = sizeof(struct nvme_zone_report) +
nr_zones * sizeof(struct nvme_zone_descriptor);
@@ -147,7 +146,8 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
return NULL;
}
-static int nvme_zone_parse_entry(struct nvme_ns *ns,
+static int nvme_zone_parse_entry(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head,
struct nvme_zone_descriptor *entry,
unsigned int idx, report_zones_cb cb,
void *data)
@@ -155,20 +155,20 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
struct blk_zone zone = { };
if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) {
- dev_err(ns->ctrl->device, "invalid zone type %#x\n",
+ dev_err(ctrl->device, "invalid zone type %#x\n",
entry->zt);
return -EINVAL;
}
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone.cond = entry->zs >> 4;
- zone.len = ns->zsze;
- zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
- zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
+ zone.len = head->zsze;
+ zone.capacity = nvme_lba_to_sect(head, le64_to_cpu(entry->zcap));
+ zone.start = nvme_lba_to_sect(head, le64_to_cpu(entry->zslba));
if (zone.cond == BLK_ZONE_COND_FULL)
zone.wp = zone.start + zone.len;
else
- zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+ zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp));
return cb(&zone, idx, data);
}
@@ -196,11 +196,11 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
- sector &= ~(ns->zsze - 1);
+ sector &= ~(ns->head->zsze - 1);
while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
memset(report, 0, buflen);
- c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
+ c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, sector));
ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
if (ret) {
if (ret > 0)
@@ -213,14 +213,15 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
break;
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
- ret = nvme_zone_parse_entry(ns, &report->entries[i],
+ ret = nvme_zone_parse_entry(ns->ctrl, ns->head,
+ &report->entries[i],
zone_idx, cb, data);
if (ret)
goto out_free;
zone_idx++;
}
- sector += ns->zsze * nz;
+ sector += ns->head->zsze * nz;
}
if (zone_idx > 0)
@@ -239,7 +240,7 @@ blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
c->zms.opcode = nvme_cmd_zone_mgmt_send;
c->zms.nsid = cpu_to_le32(ns->head->ns_id);
- c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
+ c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
c->zms.zsa = action;
if (req_op(req) == REQ_OP_ZONE_RESET_ALL)
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 79fc64035ee3..46be031f91b4 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -4,7 +4,8 @@ config NVME_TARGET
tristate "NVMe Target support"
depends on BLOCK
depends on CONFIGFS_FS
- select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
+ select NVME_KEYRING if NVME_TARGET_TCP_TLS
+ select KEYS if NVME_TARGET_TCP_TLS
select SGL_ALLOC
help
This enabled target side support for the NVMe protocol, that is
@@ -16,6 +17,15 @@ config NVME_TARGET
To configure the NVMe target you probably want to use the nvmetcli
tool from http://git.infradead.org/users/hch/nvmetcli.git.
+config NVME_TARGET_DEBUGFS
+ bool "NVMe Target debugfs support"
+ depends on NVME_TARGET
+ help
+ This enables debugfs support to display the connected controllers
+ to each subsystem
+
+ If unsure, say N.
+
config NVME_TARGET_PASSTHRU
bool "NVMe Target Passthrough support"
depends on NVME_TARGET
@@ -84,17 +94,24 @@ config NVME_TARGET_TCP
If unsure, say N.
+config NVME_TARGET_TCP_TLS
+ bool "NVMe over Fabrics TCP target TLS encryption support"
+ depends on NVME_TARGET_TCP
+ select NET_HANDSHAKE
+ help
+ Enables TLS encryption for the NVMe TCP target using the netlink handshake API.
+
+ The TLS handshake daemon is available at
+ https://github.com/oracle/ktls-utils.
+
+ If unsure, say N.
+
config NVME_TARGET_AUTH
- bool "NVMe over Fabrics In-band Authentication support"
+ bool "NVMe over Fabrics In-band Authentication in target side"
depends on NVME_TARGET
- select NVME_COMMON
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_SHA256
- select CRYPTO_SHA512
- select CRYPTO_DH
- select CRYPTO_DH_RFC7919_GROUPS
+ select NVME_AUTH
help
- This enables support for NVMe over Fabrics In-band Authentication
+ This enables support for NVMe over Fabrics In-band Authentication in
+ target side.
If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index c66820102493..c402c44350b2 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
+nvmet-$(CONFIG_NVME_TARGET_DEBUGFS) += debugfs.o
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 39cb570f833d..954d4c074770 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -344,7 +344,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
pr_debug("unhandled lid %d on qid %d\n",
req->cmd->get_log_page.lid, req->sq->qid);
req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
- nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
}
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
@@ -428,7 +428,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->cqes = (0x4 << 4) | 0x4;
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
- id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
@@ -496,7 +496,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
goto out;
}
@@ -587,6 +587,16 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
u16 status = 0;
int i = 0;
+ /*
+ * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid
+ * See NVMe Base Specification, Active Namespace ID list (CNS 02h).
+ */
+ if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
+ goto out;
+ }
+
list = kzalloc(buf_size, GFP_KERNEL);
if (!list) {
status = NVME_SC_INTERNAL;
@@ -662,7 +672,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
off) != NVME_IDENTIFY_DATA_SIZE - off)
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
out:
nvmet_req_complete(req, status);
@@ -724,7 +734,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
pr_debug("unhandled identify cns %d on qid %d\n",
req->cmd->identify.cns, req->sq->qid);
req->error_loc = offsetof(struct nvme_identify, cns);
- nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
}
/*
@@ -807,7 +817,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
if (val32 & ~mask) {
req->error_loc = offsetof(struct nvme_common_command, cdw11);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
@@ -833,7 +843,7 @@ void nvmet_execute_set_features(struct nvmet_req *req)
ncqr = (cdw11 >> 16) & 0xffff;
nsqr = cdw11 & 0xffff;
if (ncqr == 0xffff || nsqr == 0xffff) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
nvmet_set_result(req,
@@ -846,14 +856,14 @@ void nvmet_execute_set_features(struct nvmet_req *req)
status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
break;
case NVME_FEAT_HOST_ID:
- status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
break;
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_set_feat_write_protect(req);
break;
default:
req->error_loc = offsetof(struct nvme_common_command, cdw10);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
@@ -939,7 +949,7 @@ void nvmet_execute_get_features(struct nvmet_req *req)
if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
req->error_loc =
offsetof(struct nvme_common_command, cdw11);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
@@ -952,7 +962,7 @@ void nvmet_execute_get_features(struct nvmet_req *req)
default:
req->error_loc =
offsetof(struct nvme_common_command, cdw10);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
@@ -969,7 +979,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
mutex_lock(&ctrl->lock);
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
return;
}
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
@@ -1005,8 +1015,6 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_admin_cmd(req);
- if (unlikely(!nvmet_check_auth_status(req)))
- return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 4dcddcf95279..7897d02c681d 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -25,6 +25,18 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
unsigned char key_hash;
char *dhchap_secret;
+ if (!strlen(secret)) {
+ if (set_ctrl) {
+ kfree(host->dhchap_ctrl_secret);
+ host->dhchap_ctrl_secret = NULL;
+ host->dhchap_ctrl_key_hash = 0;
+ } else {
+ kfree(host->dhchap_secret);
+ host->dhchap_secret = NULL;
+ host->dhchap_key_hash = 0;
+ }
+ return 0;
+ }
if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1)
return -EINVAL;
if (key_hash > 3) {
@@ -44,6 +56,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
dhchap_secret = kstrdup(secret, GFP_KERNEL);
if (!dhchap_secret)
return -ENOMEM;
+ down_write(&nvmet_config_sem);
if (set_ctrl) {
kfree(host->dhchap_ctrl_secret);
host->dhchap_ctrl_secret = strim(dhchap_secret);
@@ -53,6 +66,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
host->dhchap_secret = strim(dhchap_secret);
host->dhchap_key_hash = key_hash;
}
+ up_write(&nvmet_config_sem);
return 0;
}
@@ -124,12 +138,11 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
return ret;
}
-int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
{
int ret = 0;
struct nvmet_host_link *p;
struct nvmet_host *host = NULL;
- const char *hash_name;
down_read(&nvmet_config_sem);
if (nvmet_is_disc_subsys(ctrl->subsys))
@@ -147,13 +160,16 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
}
if (!host) {
pr_debug("host %s not found\n", ctrl->hostnqn);
- ret = -EPERM;
+ ret = NVME_AUTH_DHCHAP_FAILURE_FAILED;
goto out_unlock;
}
ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
- if (ret < 0)
+ if (ret < 0) {
pr_warn("Failed to setup DH group");
+ ret = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ goto out_unlock;
+ }
if (!host->dhchap_secret) {
pr_debug("No authentication provided\n");
@@ -164,12 +180,6 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
pr_debug("Re-use existing hash ID %d\n",
ctrl->shash_id);
} else {
- hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
- if (!hash_name) {
- pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id);
- ret = -EINVAL;
- goto out_unlock;
- }
ctrl->shash_id = host->dhchap_hash_id;
}
@@ -178,7 +188,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10,
host->dhchap_key_hash);
if (IS_ERR(ctrl->host_key)) {
- ret = PTR_ERR(ctrl->host_key);
+ ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE;
ctrl->host_key = NULL;
goto out_free_hash;
}
@@ -196,7 +206,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10,
host->dhchap_ctrl_key_hash);
if (IS_ERR(ctrl->ctrl_key)) {
- ret = PTR_ERR(ctrl->ctrl_key);
+ ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE;
ctrl->ctrl_key = NULL;
goto out_free_hash;
}
@@ -267,7 +277,8 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
struct shash_desc *shash;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
const char *hash_name;
- u8 *challenge = req->sq->dhchap_c1, *host_response;
+ u8 *challenge = req->sq->dhchap_c1;
+ struct nvme_dhchap_key *transformed_key;
u8 buf[4];
int ret;
@@ -284,21 +295,22 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
}
if (shash_len != crypto_shash_digestsize(shash_tfm)) {
- pr_debug("%s: hash len mismatch (len %d digest %d)\n",
- __func__, shash_len,
- crypto_shash_digestsize(shash_tfm));
+ pr_err("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
ret = -EINVAL;
goto out_free_tfm;
}
- host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn);
- if (IS_ERR(host_response)) {
- ret = PTR_ERR(host_response);
+ transformed_key = nvme_auth_transform_key(ctrl->host_key,
+ ctrl->hostnqn);
+ if (IS_ERR(transformed_key)) {
+ ret = PTR_ERR(transformed_key);
goto out_free_tfm;
}
- ret = crypto_shash_setkey(shash_tfm, host_response,
- ctrl->host_key->len);
+ ret = crypto_shash_setkey(shash_tfm, transformed_key->key,
+ transformed_key->len);
if (ret)
goto out_free_response;
@@ -314,7 +326,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
req->sq->dhchap_c1,
challenge, shash_len);
if (ret)
- goto out_free_response;
+ goto out_free_challenge;
}
pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
@@ -325,7 +337,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
GFP_KERNEL);
if (!shash) {
ret = -ENOMEM;
- goto out_free_response;
+ goto out_free_challenge;
}
shash->tfm = shash_tfm;
ret = crypto_shash_init(shash);
@@ -361,14 +373,15 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
goto out;
ret = crypto_shash_final(shash, response);
out:
+ kfree(shash);
+out_free_challenge:
if (challenge != req->sq->dhchap_c1)
kfree(challenge);
- kfree(shash);
out_free_response:
- kfree_sensitive(host_response);
+ nvme_auth_free_key(transformed_key);
out_free_tfm:
crypto_free_shash(shash_tfm);
- return 0;
+ return ret;
}
int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
@@ -378,7 +391,8 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
struct shash_desc *shash;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
const char *hash_name;
- u8 *challenge = req->sq->dhchap_c2, *ctrl_response;
+ u8 *challenge = req->sq->dhchap_c2;
+ struct nvme_dhchap_key *transformed_key;
u8 buf[4];
int ret;
@@ -402,15 +416,15 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
goto out_free_tfm;
}
- ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
ctrl->subsysnqn);
- if (IS_ERR(ctrl_response)) {
- ret = PTR_ERR(ctrl_response);
+ if (IS_ERR(transformed_key)) {
+ ret = PTR_ERR(transformed_key);
goto out_free_tfm;
}
- ret = crypto_shash_setkey(shash_tfm, ctrl_response,
- ctrl->ctrl_key->len);
+ ret = crypto_shash_setkey(shash_tfm, transformed_key->key,
+ transformed_key->len);
if (ret)
goto out_free_response;
@@ -426,14 +440,14 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
req->sq->dhchap_c2,
challenge, shash_len);
if (ret)
- goto out_free_response;
+ goto out_free_challenge;
}
shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
GFP_KERNEL);
if (!shash) {
ret = -ENOMEM;
- goto out_free_response;
+ goto out_free_challenge;
}
shash->tfm = shash_tfm;
@@ -470,14 +484,15 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
goto out;
ret = crypto_shash_final(shash, response);
out:
+ kfree(shash);
+out_free_challenge:
if (challenge != req->sq->dhchap_c2)
kfree(challenge);
- kfree(shash);
out_free_response:
- kfree_sensitive(ctrl_response);
+ nvme_auth_free_key(transformed_key);
out_free_tfm:
crypto_free_shash(shash_tfm);
- return 0;
+ return ret;
}
int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 907143870da5..685e89b35d33 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -15,8 +15,10 @@
#ifdef CONFIG_NVME_TARGET_AUTH
#include <linux/nvme-auth.h>
#endif
+#include <linux/nvme-keyring.h>
#include <crypto/hash.h>
#include <crypto/kpp.h>
+#include <linux/nospec.h>
#include "nvmet.h"
@@ -159,10 +161,14 @@ static const struct nvmet_type_name_map nvmet_addr_treq[] = {
{ NVMF_TREQ_NOT_REQUIRED, "not required" },
};
+static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port)
+{
+ return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK);
+}
+
static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
{
- u8 treq = to_nvmet_port(item)->disc_addr.treq &
- NVME_TREQ_SECURE_CHANNEL_MASK;
+ u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item));
int i;
for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
@@ -178,7 +184,7 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
- u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
+ u8 treq = nvmet_port_disc_addr_treq_mask(port);
int i;
if (nvmet_is_port_enabled(port, __func__))
@@ -193,6 +199,20 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item,
return -EINVAL;
found:
+ if (port->disc_addr.trtype == NVMF_TRTYPE_TCP &&
+ port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) {
+ switch (nvmet_addr_treq[i].type) {
+ case NVMF_TREQ_NOT_SPECIFIED:
+ pr_debug("treq '%s' not allowed for TLS1.3\n",
+ nvmet_addr_treq[i].name);
+ return -EINVAL;
+ case NVMF_TREQ_NOT_REQUIRED:
+ pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n");
+ break;
+ default:
+ break;
+ }
+ }
treq |= nvmet_addr_treq[i].type;
port->disc_addr.treq = treq;
return count;
@@ -253,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+static ssize_t nvmet_param_max_queue_size_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size);
+}
+
+static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int ret;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+ ret = kstrtoint(page, 0, &port->max_queue_size);
+ if (ret) {
+ pr_err("Invalid value '%s' for max_queue_size\n", page);
+ return -EINVAL;
+ }
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_max_queue_size);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
char *page)
@@ -303,6 +349,11 @@ static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
}
+static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype)
+{
+ port->disc_addr.tsas.tcp.sectype = sectype;
+}
+
static ssize_t nvmet_addr_trtype_store(struct config_item *item,
const char *page, size_t count)
{
@@ -325,11 +376,120 @@ found:
port->disc_addr.trtype = nvmet_transport[i].type;
if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
nvmet_port_init_tsas_rdma(port);
+ else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP)
+ nvmet_port_init_tsas_tcp(port, NVMF_TCP_SECTYPE_NONE);
return count;
}
CONFIGFS_ATTR(nvmet_, addr_trtype);
+static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = {
+ { NVMF_TCP_SECTYPE_NONE, "none" },
+ { NVMF_TCP_SECTYPE_TLS13, "tls1.3" },
+};
+
+static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = {
+ { NVMF_RDMA_QPTYPE_CONNECTED, "connected" },
+ { NVMF_RDMA_QPTYPE_DATAGRAM, "datagram" },
+};
+
+static ssize_t nvmet_addr_tsas_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int i;
+
+ if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
+ if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type)
+ return sprintf(page, "%s\n", nvmet_addr_tsas_tcp[i].name);
+ }
+ } else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
+ if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type)
+ return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
+ }
+ }
+ return sprintf(page, "\n");
+}
+
+static u8 nvmet_addr_tsas_rdma_store(const char *page)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
+ if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name))
+ return nvmet_addr_tsas_rdma[i].type;
+ }
+ return NVMF_RDMA_QPTYPE_INVALID;
+}
+
+static u8 nvmet_addr_tsas_tcp_store(const char *page)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
+ if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name))
+ return nvmet_addr_tsas_tcp[i].type;
+ }
+ return NVMF_TCP_SECTYPE_INVALID;
+}
+
+static ssize_t nvmet_addr_tsas_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ u8 treq = nvmet_port_disc_addr_treq_mask(port);
+ u8 sectype, qptype;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
+ qptype = nvmet_addr_tsas_rdma_store(page);
+ if (qptype == port->disc_addr.tsas.rdma.qptype)
+ return count;
+ } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
+ sectype = nvmet_addr_tsas_tcp_store(page);
+ if (sectype != NVMF_TCP_SECTYPE_INVALID)
+ goto found;
+ }
+
+ pr_err("Invalid value '%s' for tsas\n", page);
+ return -EINVAL;
+
+found:
+ if (sectype == NVMF_TCP_SECTYPE_TLS13) {
+ if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) {
+ pr_err("TLS is not supported\n");
+ return -EINVAL;
+ }
+ if (!port->keyring) {
+ pr_err("TLS keyring not configured\n");
+ return -EINVAL;
+ }
+ }
+
+ nvmet_port_init_tsas_tcp(port, sectype);
+ /*
+ * If TLS is enabled TREQ should be set to 'required' per default
+ */
+ if (sectype == NVMF_TCP_SECTYPE_TLS13) {
+ u8 sc = nvmet_port_disc_addr_treq_secure_channel(port);
+
+ if (sc == NVMF_TREQ_NOT_SPECIFIED)
+ treq |= NVMF_TREQ_REQUIRED;
+ else
+ treq |= sc;
+ } else {
+ treq |= NVMF_TREQ_NOT_SPECIFIED;
+ }
+ port->disc_addr.treq = treq;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_tsas);
+
/*
* Namespace structures & file operation functions below
*/
@@ -509,6 +669,7 @@ static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
down_write(&nvmet_ana_sem);
oldgrpid = ns->anagrpid;
+ newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
nvmet_ana_group_enabled[newgrpid]++;
ns->anagrpid = newgrpid;
nvmet_ana_group_enabled[oldgrpid]--;
@@ -536,10 +697,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
if (kstrtobool(page, &enable))
return -EINVAL;
+ /*
+ * take a global nvmet_config_sem because the disable routine has a
+ * window where it releases the subsys-lock, giving a chance to
+ * a parallel enable to concurrently execute causing the disable to
+ * have a misaccounting of the ns percpu_ref.
+ */
+ down_write(&nvmet_config_sem);
if (enable)
ret = nvmet_ns_enable(ns);
else
nvmet_ns_disable(ns);
+ up_write(&nvmet_config_sem);
return ret ? ret : count;
}
@@ -614,6 +783,18 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
NULL,
};
+bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
+{
+ struct config_item *ns_item;
+ char name[12];
+
+ snprintf(name, sizeof(name), "%u", nsid);
+ mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
+ ns_item = config_group_find_item(&subsys->namespaces_group, name);
+ mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
+ return ns_item != NULL;
+}
+
static void nvmet_ns_release(struct config_item *item)
{
struct nvmet_ns *ns = to_nvmet_ns(item);
@@ -1162,7 +1343,7 @@ static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
return -EINVAL;
down_write(&nvmet_config_sem);
- if (cntlid_min >= to_subsys(item)->cntlid_max)
+ if (cntlid_min > to_subsys(item)->cntlid_max)
goto out_unlock;
to_subsys(item)->cntlid_min = cntlid_min;
up_write(&nvmet_config_sem);
@@ -1192,7 +1373,7 @@ static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
return -EINVAL;
down_write(&nvmet_config_sem);
- if (cntlid_max <= to_subsys(item)->cntlid_min)
+ if (cntlid_max < to_subsys(item)->cntlid_min)
goto out_unlock;
to_subsys(item)->cntlid_max = cntlid_max;
up_write(&nvmet_config_sem);
@@ -1473,6 +1654,11 @@ static struct config_group *nvmet_subsys_make(struct config_group *group,
return ERR_PTR(-EINVAL);
}
+ if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) {
+ pr_err("can't create subsystem using unique discovery NQN\n");
+ return ERR_PTR(-EINVAL);
+ }
+
subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
if (IS_ERR(subsys))
return ERR_CAST(subsys);
@@ -1700,6 +1886,7 @@ static struct config_group *nvmet_ana_groups_make_group(
grp->grpid = grpid;
down_write(&nvmet_ana_sem);
+ grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
nvmet_ana_group_enabled[grpid]++;
up_write(&nvmet_ana_sem);
@@ -1731,6 +1918,7 @@ static void nvmet_port_release(struct config_item *item)
flush_workqueue(nvmet_wq);
list_del(&port->global_entry);
+ key_put(port->keyring);
kfree(port->ana_state);
kfree(port);
}
@@ -1741,7 +1929,9 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_traddr,
&nvmet_attr_addr_trsvcid,
&nvmet_attr_addr_trtype,
+ &nvmet_attr_addr_tsas,
&nvmet_attr_param_inline_data_size,
+ &nvmet_attr_param_max_queue_size,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_attr_param_pi_enable,
#endif
@@ -1779,6 +1969,14 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
return ERR_PTR(-ENOMEM);
}
+ if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) {
+ port->keyring = key_lookup(nvme_keyring_id());
+ if (IS_ERR(port->keyring)) {
+ pr_warn("NVMe keyring not available, disabling TLS\n");
+ port->keyring = NULL;
+ }
+ }
+
for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
if (i == NVMET_DEFAULT_ANA_GRPID)
port->ana_state[1] = NVME_ANA_OPTIMIZED;
@@ -1792,6 +1990,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
INIT_LIST_HEAD(&port->subsystems);
INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */
+ port->max_queue_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid);
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
@@ -1837,11 +2036,17 @@ static struct config_group nvmet_ports_group;
static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
char *page)
{
- u8 *dhchap_secret = to_host(item)->dhchap_secret;
+ u8 *dhchap_secret;
+ ssize_t ret;
+ down_read(&nvmet_config_sem);
+ dhchap_secret = to_host(item)->dhchap_secret;
if (!dhchap_secret)
- return sprintf(page, "\n");
- return sprintf(page, "%s\n", dhchap_secret);
+ ret = sprintf(page, "\n");
+ else
+ ret = sprintf(page, "%s\n", dhchap_secret);
+ up_read(&nvmet_config_sem);
+ return ret;
}
static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
@@ -1865,10 +2070,16 @@ static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
char *page)
{
u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
+ ssize_t ret;
+ down_read(&nvmet_config_sem);
+ dhchap_secret = to_host(item)->dhchap_ctrl_secret;
if (!dhchap_secret)
- return sprintf(page, "\n");
- return sprintf(page, "%s\n", dhchap_secret);
+ ret = sprintf(page, "\n");
+ else
+ ret = sprintf(page, "%s\n", dhchap_secret);
+ up_read(&nvmet_config_sem);
+ return ret;
}
static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
@@ -2006,7 +2217,49 @@ static const struct config_item_type nvmet_hosts_type = {
static struct config_group nvmet_hosts_group;
+static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn);
+}
+
+static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct list_head *entry;
+ size_t len;
+
+ len = strcspn(page, "\n");
+ if (!len || len > NVMF_NQN_FIELD_LEN - 1)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ list_for_each(entry, &nvmet_subsystems_group.cg_children) {
+ struct config_item *item =
+ container_of(entry, struct config_item, ci_entry);
+
+ if (!strncmp(config_item_name(item), page, len)) {
+ pr_err("duplicate NQN %s\n", config_item_name(item));
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+ }
+ }
+ memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
+ memcpy(nvmet_disc_subsys->subsysnqn, page, len);
+ up_write(&nvmet_config_sem);
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_root_, discovery_nqn);
+
+static struct configfs_attribute *nvmet_root_attrs[] = {
+ &nvmet_root_attr_discovery_nqn,
+ NULL,
+};
+
static const struct config_item_type nvmet_root_type = {
+ .ct_attrs = nvmet_root_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 3935165048e7..ed2424f8a396 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -16,6 +16,7 @@
#include "trace.h"
#include "nvmet.h"
+#include "debugfs.h"
struct kmem_cache *nvmet_bvec_cache;
struct workqueue_struct *buffered_io_wq;
@@ -55,18 +56,18 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
return NVME_SC_SUCCESS;
case -ENOSPC:
req->error_loc = offsetof(struct nvme_rw_command, length);
- return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+ return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
case -EREMOTEIO:
req->error_loc = offsetof(struct nvme_rw_command, slba);
- return NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ return NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
case -EOPNOTSUPP:
req->error_loc = offsetof(struct nvme_common_command, opcode);
switch (req->cmd->common.opcode) {
case nvme_cmd_dsm:
case nvme_cmd_write_zeroes:
- return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
+ return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
default:
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
break;
case -ENODATA:
@@ -76,7 +77,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
fallthrough;
default:
req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
}
}
@@ -86,7 +87,7 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
req->sq->qid);
req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
@@ -97,7 +98,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
{
if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
- return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
}
return 0;
}
@@ -106,7 +107,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
{
if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
- return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
}
return 0;
}
@@ -115,7 +116,7 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
{
if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
- return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
}
return 0;
}
@@ -145,7 +146,7 @@ static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
while (ctrl->nr_async_event_cmds) {
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR);
mutex_lock(&ctrl->lock);
}
mutex_unlock(&ctrl->lock);
@@ -248,7 +249,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
continue;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_NS_CHANGED,
NVME_LOG_CHANGED_NS);
}
@@ -265,7 +266,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys,
continue;
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
continue;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
}
mutex_unlock(&subsys->lock);
@@ -358,6 +359,18 @@ int nvmet_enable_port(struct nvmet_port *port)
if (port->inline_data_size < 0)
port->inline_data_size = 0;
+ /*
+ * If the transport didn't set the max_queue_size properly, then clamp
+ * it to the target limits. Also set default values in case the
+ * transport didn't set it at all.
+ */
+ if (port->max_queue_size < 0)
+ port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
+ else
+ port->max_queue_size = clamp_t(int, port->max_queue_size,
+ NVMET_MIN_QUEUE_SIZE,
+ NVMET_MAX_QUEUE_SIZE);
+
port->enabled = true;
port->tr_ops = ops;
return 0;
@@ -425,11 +438,14 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
u16 nvmet_req_find_ns(struct nvmet_req *req)
{
u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
- req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
+ req->ns = xa_load(&subsys->namespaces, nsid);
if (unlikely(!req->ns)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ if (nvmet_subsys_nsid_exists(subsys, nsid))
+ return NVME_SC_INTERNAL_PATH_ERROR;
+ return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
}
percpu_ref_get(&req->ns->ref);
@@ -803,6 +819,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
percpu_ref_exit(&sq->ref);
nvmet_auth_sq_free(sq);
+ /*
+ * we must reference the ctrl again after waiting for inflight IO
+ * to complete. Because admin connect may have sneaked in after we
+ * store sq->ctrl locally, but before we killed the percpu_ref. the
+ * admin connect allocates and assigns sq->ctrl, which now needs a
+ * final ref put, as this ctrl is going away.
+ */
+ ctrl = sq->ctrl;
+
if (ctrl) {
/*
* The teardown flow may take some time, and the host may not
@@ -880,7 +905,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
return nvmet_parse_fabrics_io_cmd(req);
if (unlikely(!nvmet_check_auth_status(req)))
- return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
ret = nvmet_check_ctrl_status(req);
if (unlikely(ret))
@@ -933,6 +958,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->metadata_sg_cnt = 0;
req->transfer_len = 0;
req->metadata_len = 0;
+ req->cqe->result.u64 = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;
@@ -942,7 +968,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
req->error_loc = offsetof(struct nvme_common_command, flags);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto fail;
}
@@ -953,7 +979,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
*/
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
req->error_loc = offsetof(struct nvme_common_command, flags);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto fail;
}
@@ -971,7 +997,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
trace_nvmet_req_init(req, req->cmd);
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto fail;
}
@@ -998,7 +1024,7 @@ bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{
if (unlikely(len != req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
- nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
return false;
}
@@ -1010,7 +1036,7 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
{
if (unlikely(data_len > req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
- nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
return false;
}
@@ -1223,9 +1249,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
if (ctrl->ops->get_max_queue_size)
- ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
+ ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
+ ctrl->port->max_queue_size) - 1;
else
- ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+ ctrl->cap |= ctrl->port->max_queue_size - 1;
if (nvmet_is_passthru_subsys(ctrl->subsys))
nvmet_passthrough_override_cap(ctrl);
@@ -1278,18 +1305,18 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req)
if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
req->cmd->common.opcode, req->sq->qid);
- return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
}
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
req->cmd->common.opcode, req->sq->qid);
- return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
}
if (unlikely(!nvmet_check_auth_status(req))) {
pr_warn("qid %d not authenticated\n", req->sq->qid);
- return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
}
return 0;
}
@@ -1363,7 +1390,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
int ret;
u16 status;
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
subsys = nvmet_find_get_subsys(req->port, subsysnqn);
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
@@ -1379,7 +1406,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
hostnqn, subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
- status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, dptr);
goto out_put_subsystem;
}
@@ -1411,6 +1438,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
+ ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
nvmet_init_cap(ctrl);
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
@@ -1425,14 +1453,11 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!ctrl->sqs)
goto out_free_changed_ns_list;
- if (subsys->cntlid_min > subsys->cntlid_max)
- goto out_free_sqs;
-
ret = ida_alloc_range(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
- status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
goto out_free_sqs;
}
ctrl->cntlid = ret;
@@ -1455,6 +1480,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
mutex_lock(&subsys->lock);
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
nvmet_setup_p2p_ns_map(ctrl, req);
+ nvmet_debugfs_ctrl_setup(ctrl);
mutex_unlock(&subsys->lock);
*ctrlp = ctrl;
@@ -1489,6 +1515,8 @@ static void nvmet_ctrl_free(struct kref *ref)
nvmet_destroy_auth(ctrl);
+ nvmet_debugfs_ctrl_free(ctrl);
+
ida_free(&cntlid_ida, ctrl->cntlid);
nvmet_async_events_free(ctrl);
@@ -1515,6 +1543,14 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
+ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_len)
+{
+ if (!ctrl->ops->host_traddr)
+ return -EOPNOTSUPP;
+ return ctrl->ops->host_traddr(ctrl, traddr, traddr_len);
+}
+
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
const char *subsysnqn)
{
@@ -1530,6 +1566,13 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
}
down_read(&nvmet_config_sem);
+ if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn,
+ NVMF_NQN_SIZE)) {
+ if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) {
+ up_read(&nvmet_config_sem);
+ return nvmet_disc_subsys;
+ }
+ }
list_for_each_entry(p, &port->subsystems, entry) {
if (!strncmp(p->subsys->subsysnqn, subsysnqn,
NVMF_NQN_SIZE)) {
@@ -1602,8 +1645,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
INIT_LIST_HEAD(&subsys->ctrls);
INIT_LIST_HEAD(&subsys->hosts);
+ ret = nvmet_debugfs_subsys_setup(subsys);
+ if (ret)
+ goto free_subsysnqn;
+
return subsys;
+free_subsysnqn:
+ kfree(subsys->subsysnqn);
free_fr:
kfree(subsys->firmware_rev);
free_mn:
@@ -1620,6 +1669,8 @@ static void nvmet_subsys_free(struct kref *ref)
WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
+ nvmet_debugfs_subsys_free(subsys);
+
xa_destroy(&subsys->namespaces);
nvmet_passthru_subsys_free(subsys);
@@ -1665,7 +1716,8 @@ static int __init nvmet_init(void)
if (!buffered_io_wq)
goto out_free_zbd_work_queue;
- nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+ nvmet_wq = alloc_workqueue("nvmet-wq",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!nvmet_wq)
goto out_free_buffered_work_queue;
@@ -1673,11 +1725,18 @@ static int __init nvmet_init(void)
if (error)
goto out_free_nvmet_work_queue;
- error = nvmet_init_configfs();
+ error = nvmet_init_debugfs();
if (error)
goto out_exit_discovery;
+
+ error = nvmet_init_configfs();
+ if (error)
+ goto out_exit_debugfs;
+
return 0;
+out_exit_debugfs:
+ nvmet_exit_debugfs();
out_exit_discovery:
nvmet_exit_discovery();
out_free_nvmet_work_queue:
@@ -1694,6 +1753,7 @@ out_destroy_bvec_cache:
static void __exit nvmet_exit(void)
{
nvmet_exit_configfs();
+ nvmet_exit_debugfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
@@ -1708,4 +1768,5 @@ static void __exit nvmet_exit(void)
module_init(nvmet_init);
module_exit(nvmet_exit);
+MODULE_DESCRIPTION("NVMe target core framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c
new file mode 100644
index 000000000000..220c7391fc19
--- /dev/null
+++ b/drivers/nvme/target/debugfs.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DebugFS interface for the NVMe target.
+ * Copyright (c) 2022-2024 Shadow
+ * Copyright (c) 2024 SUSE LLC
+ */
+
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include "nvmet.h"
+#include "debugfs.h"
+
+static struct dentry *nvmet_debugfs;
+
+#define NVMET_DEBUGFS_ATTR(field) \
+ static int field##_open(struct inode *inode, struct file *file) \
+ { return single_open(file, field##_show, inode->i_private); } \
+ \
+ static const struct file_operations field##_fops = { \
+ .open = field##_open, \
+ .read = seq_read, \
+ .release = single_release, \
+ }
+
+#define NVMET_DEBUGFS_RW_ATTR(field) \
+ static int field##_open(struct inode *inode, struct file *file) \
+ { return single_open(file, field##_show, inode->i_private); } \
+ \
+ static const struct file_operations field##_fops = { \
+ .open = field##_open, \
+ .read = seq_read, \
+ .write = field##_write, \
+ .release = single_release, \
+ }
+
+static int nvmet_ctrl_hostnqn_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+
+ seq_puts(m, ctrl->hostnqn);
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_hostnqn);
+
+static int nvmet_ctrl_kato_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+
+ seq_printf(m, "%d\n", ctrl->kato);
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_kato);
+
+static int nvmet_ctrl_port_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+
+ seq_printf(m, "%d\n", le16_to_cpu(ctrl->port->disc_addr.portid));
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_port);
+
+static const char *const csts_state_names[] = {
+ [NVME_CSTS_RDY] = "ready",
+ [NVME_CSTS_CFS] = "fatal",
+ [NVME_CSTS_NSSRO] = "reset",
+ [NVME_CSTS_SHST_OCCUR] = "shutdown",
+ [NVME_CSTS_SHST_CMPLT] = "completed",
+ [NVME_CSTS_PP] = "paused",
+};
+
+static int nvmet_ctrl_state_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+ bool sep = false;
+ int i;
+
+ for (i = 0; i < 7; i++) {
+ int state = BIT(i);
+
+ if (!(ctrl->csts & state))
+ continue;
+ if (sep)
+ seq_puts(m, "|");
+ sep = true;
+ if (csts_state_names[state])
+ seq_puts(m, csts_state_names[state]);
+ else
+ seq_printf(m, "%d", state);
+ }
+ if (sep)
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static ssize_t nvmet_ctrl_state_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct nvmet_ctrl *ctrl = m->private;
+ char reset[16];
+
+ if (count >= sizeof(reset))
+ return -EINVAL;
+ if (copy_from_user(reset, buf, count))
+ return -EFAULT;
+ if (!memcmp(reset, "fatal", 5))
+ nvmet_ctrl_fatal_error(ctrl);
+ else
+ return -EINVAL;
+ return count;
+}
+NVMET_DEBUGFS_RW_ATTR(nvmet_ctrl_state);
+
+static int nvmet_ctrl_host_traddr_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+ ssize_t size;
+ char buf[NVMF_TRADDR_SIZE + 1];
+
+ size = nvmet_ctrl_host_traddr(ctrl, buf, NVMF_TRADDR_SIZE);
+ if (size < 0) {
+ buf[0] = '\0';
+ size = 0;
+ }
+ buf[size] = '\0';
+ seq_printf(m, "%s\n", buf);
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_host_traddr);
+
+int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl)
+{
+ char name[32];
+ struct dentry *parent = ctrl->subsys->debugfs_dir;
+ int ret;
+
+ if (!parent)
+ return -ENODEV;
+ snprintf(name, sizeof(name), "ctrl%d", ctrl->cntlid);
+ ctrl->debugfs_dir = debugfs_create_dir(name, parent);
+ if (IS_ERR(ctrl->debugfs_dir)) {
+ ret = PTR_ERR(ctrl->debugfs_dir);
+ ctrl->debugfs_dir = NULL;
+ return ret;
+ }
+ debugfs_create_file("port", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_port_fops);
+ debugfs_create_file("hostnqn", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_hostnqn_fops);
+ debugfs_create_file("kato", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_kato_fops);
+ debugfs_create_file("state", S_IRUSR | S_IWUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_state_fops);
+ debugfs_create_file("host_traddr", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_host_traddr_fops);
+ return 0;
+}
+
+void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl)
+{
+ debugfs_remove_recursive(ctrl->debugfs_dir);
+}
+
+int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys)
+{
+ int ret = 0;
+
+ subsys->debugfs_dir = debugfs_create_dir(subsys->subsysnqn,
+ nvmet_debugfs);
+ if (IS_ERR(subsys->debugfs_dir)) {
+ ret = PTR_ERR(subsys->debugfs_dir);
+ subsys->debugfs_dir = NULL;
+ }
+ return ret;
+}
+
+void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys)
+{
+ debugfs_remove_recursive(subsys->debugfs_dir);
+}
+
+int __init nvmet_init_debugfs(void)
+{
+ struct dentry *parent;
+
+ parent = debugfs_create_dir("nvmet", NULL);
+ if (IS_ERR(parent)) {
+ pr_warn("%s: failed to create debugfs directory\n", "nvmet");
+ return PTR_ERR(parent);
+ }
+ nvmet_debugfs = parent;
+ return 0;
+}
+
+void nvmet_exit_debugfs(void)
+{
+ debugfs_remove_recursive(nvmet_debugfs);
+}
diff --git a/drivers/nvme/target/debugfs.h b/drivers/nvme/target/debugfs.h
new file mode 100644
index 000000000000..cfb8bbf6a297
--- /dev/null
+++ b/drivers/nvme/target/debugfs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DebugFS interface for the NVMe target.
+ * Copyright (c) 2022-2024 Shadow
+ * Copyright (c) 2024 SUSE LLC
+ */
+#ifndef NVMET_DEBUGFS_H
+#define NVMET_DEBUGFS_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_NVME_TARGET_DEBUGFS
+int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys);
+void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys);
+int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl);
+void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl);
+
+int __init nvmet_init_debugfs(void);
+void nvmet_exit_debugfs(void);
+#else
+static inline int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys)
+{
+ return 0;
+}
+static inline void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys){}
+
+static inline int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl)
+{
+ return 0;
+}
+static inline void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl) {}
+
+static inline int __init nvmet_init_debugfs(void)
+{
+ return 0;
+}
+
+static inline void nvmet_exit_debugfs(void) {}
+
+#endif
+
+#endif /* NVMET_DEBUGFS_H */
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 668d257fa986..28843df5fa7c 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -21,7 +21,7 @@ static void __nvmet_disc_changed(struct nvmet_port *port,
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
return;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
}
@@ -179,7 +179,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
req->error_loc =
offsetof(struct nvme_get_log_page_command, lid);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -187,7 +187,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
if (offset & 0x3) {
req->error_loc =
offsetof(struct nvme_get_log_page_command, lpo);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -256,7 +256,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
req->error_loc = offsetof(struct nvme_identify, cns);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -282,7 +282,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
id->lpa = (1 << 2);
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
- id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->flags & NVMF_KEYED_SGLS)
@@ -320,7 +320,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
default:
req->error_loc =
offsetof(struct nvme_common_command, cdw10);
- stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
@@ -345,7 +345,7 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
default:
req->error_loc =
offsetof(struct nvme_common_command, cdw10);
- stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
@@ -361,7 +361,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
cmd->common.opcode);
req->error_loc =
offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
switch (cmd->common.opcode) {
@@ -386,7 +386,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
default:
pr_debug("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
}
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 586458f765f1..3f2857c17d95 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -31,7 +31,7 @@ void nvmet_auth_sq_init(struct nvmet_sq *sq)
sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
}
-static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
+static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmf_auth_dhchap_negotiate_data *data = d;
@@ -109,7 +109,7 @@ static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
return 0;
}
-static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
+static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmf_auth_dhchap_reply_data *data = d;
@@ -163,16 +163,16 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
__func__, ctrl->cntlid, req->sq->qid, data->hl,
req->sq->dhchap_c2);
- req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
} else {
req->sq->authenticated = true;
req->sq->dhchap_c2 = NULL;
}
+ req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
return 0;
}
-static u16 nvmet_auth_failure2(void *d)
+static u8 nvmet_auth_failure2(void *d)
{
struct nvmf_auth_dhchap_failure_data *data = d;
@@ -186,28 +186,29 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
void *d;
u32 tl;
u16 status = 0;
+ u8 dhchap_status;
if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_send_command, secp);
goto done;
}
if (req->cmd->auth_send.spsp0 != 0x01) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_send_command, spsp0);
goto done;
}
if (req->cmd->auth_send.spsp1 != 0x01) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_send_command, spsp1);
goto done;
}
tl = le32_to_cpu(req->cmd->auth_send.tl);
if (!tl) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_send_command, tl);
goto done;
@@ -237,30 +238,32 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
/* Restart negotiation */
- pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
- ctrl->cntlid, req->sq->qid);
+ pr_debug("%s: ctrl %d qid %d reset negotiation\n",
+ __func__, ctrl->cntlid, req->sq->qid);
if (!req->sq->qid) {
- if (nvmet_setup_auth(ctrl) < 0) {
- status = NVME_SC_INTERNAL;
- pr_err("ctrl %d qid 0 failed to setup"
- "re-authentication",
+ dhchap_status = nvmet_setup_auth(ctrl);
+ if (dhchap_status) {
+ pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
ctrl->cntlid);
- goto done_failure1;
+ req->sq->dhchap_status = dhchap_status;
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ goto done_kfree;
}
}
- req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
} else if (data->auth_id != req->sq->dhchap_step)
goto done_failure1;
/* Validate negotiation parameters */
- status = nvmet_auth_negotiate(req, d);
- if (status == 0)
+ dhchap_status = nvmet_auth_negotiate(req, d);
+ if (dhchap_status == 0)
req->sq->dhchap_step =
NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
else {
req->sq->dhchap_step =
NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
- req->sq->dhchap_status = status;
- status = 0;
+ req->sq->dhchap_status = dhchap_status;
}
goto done_kfree;
}
@@ -284,15 +287,14 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
switch (data->auth_id) {
case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
- status = nvmet_auth_reply(req, d);
- if (status == 0)
+ dhchap_status = nvmet_auth_reply(req, d);
+ if (dhchap_status == 0)
req->sq->dhchap_step =
NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
else {
req->sq->dhchap_step =
NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
- req->sq->dhchap_status = status;
- status = 0;
+ req->sq->dhchap_status = dhchap_status;
}
goto done_kfree;
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
@@ -301,13 +303,12 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
__func__, ctrl->cntlid, req->sq->qid);
goto done_kfree;
case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
- status = nvmet_auth_failure2(d);
- if (status) {
+ dhchap_status = nvmet_auth_failure2(d);
+ if (dhchap_status) {
pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
- ctrl->cntlid, req->sq->qid, status);
- req->sq->dhchap_status = status;
+ ctrl->cntlid, req->sq->qid, dhchap_status);
+ req->sq->dhchap_status = dhchap_status;
req->sq->authenticated = false;
- status = 0;
}
goto done_kfree;
default:
@@ -332,20 +333,21 @@ done:
pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
__func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc);
- req->cqe->result.u64 = 0;
- nvmet_req_complete(req, status);
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
mod_delayed_work(system_wq, &req->sq->auth_expired_work,
auth_expire_secs * HZ);
- return;
+ goto complete;
}
/* Final states, clear up variables */
nvmet_auth_sq_free(req->sq);
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
nvmet_ctrl_fatal_error(ctrl);
+
+complete:
+ nvmet_req_complete(req, status);
}
static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
@@ -435,26 +437,26 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
u16 status = 0;
if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_receive_command, secp);
goto done;
}
if (req->cmd->auth_receive.spsp0 != 0x01) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_receive_command, spsp0);
goto done;
}
if (req->cmd->auth_receive.spsp1 != 0x01) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_receive_command, spsp1);
goto done;
}
al = le32_to_cpu(req->cmd->auth_receive.al);
if (!al) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
offsetof(struct nvmf_auth_receive_command, al);
goto done;
@@ -513,12 +515,11 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, d, al);
kfree(d);
done:
- req->cqe->result.u64 = 0;
- nvmet_req_complete(req, status);
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
nvmet_auth_sq_free(req->sq);
nvmet_ctrl_fatal_error(ctrl);
}
+ nvmet_req_complete(req, status);
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 43b5bd8bb6a5..c4b2eddd5666 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -18,7 +18,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
if (req->cmd->prop_set.attrib & 1) {
req->error_loc =
offsetof(struct nvmf_property_set_command, attrib);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -29,7 +29,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
default:
req->error_loc =
offsetof(struct nvmf_property_set_command, offset);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
out:
nvmet_req_complete(req, status);
@@ -50,7 +50,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
val = ctrl->cap;
break;
default:
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
} else {
@@ -65,7 +65,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
val = ctrl->csts;
break;
default:
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break;
}
}
@@ -105,7 +105,7 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
return 0;
@@ -128,7 +128,7 @@ u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
return 0;
@@ -147,29 +147,30 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
pr_warn("queue size zero!\n");
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
- ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
goto err;
}
if (ctrl->sqs[qid] != NULL) {
pr_warn("qid %u has already been created\n", qid);
req->error_loc = offsetof(struct nvmf_connect_command, qid);
- return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
}
- if (sqsize > mqes) {
+ /* for fabrics, this value applies to only the I/O Submission Queues */
+ if (qid && sqsize > mqes) {
pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
sqsize, mqes, ctrl->cntlid);
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
- return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ return NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
}
old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
if (old) {
pr_warn("queue already connected!\n");
req->error_loc = offsetof(struct nvmf_connect_command, opcode);
- return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
}
/* note: convert queue size from 0's-based value to 1's-based value */
@@ -209,8 +210,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
- u16 status = 0;
- int ret;
+ u16 status;
+ u8 dhchap_status;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
@@ -225,40 +226,38 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
- status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
goto out;
}
if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid);
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
goto out;
}
+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl);
if (status)
goto out;
- ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
-
uuid_copy(&ctrl->hostid, &d->hostid);
- ret = nvmet_setup_auth(ctrl);
- if (ret < 0) {
- pr_err("Failed to setup authentication, error %d\n", ret);
+ dhchap_status = nvmet_setup_auth(ctrl);
+ if (dhchap_status) {
+ pr_err("Failed to setup authentication, dhchap status %u\n",
+ dhchap_status);
nvmet_ctrl_put(ctrl);
- if (ret == -EPERM)
- status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
+ if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
+ status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR);
else
status = NVME_SC_INTERNAL;
goto out;
@@ -288,7 +287,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl;
u16 qid = le16_to_cpu(c->qid);
- u16 status = 0;
+ u16 status;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
@@ -303,26 +302,25 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
- status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
goto out;
}
+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
le16_to_cpu(d->cntlid), req);
if (!ctrl) {
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
goto out;
}
if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid);
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
goto out_ctrl_put;
}
@@ -352,13 +350,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
pr_debug("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
if (cmd->connect.qid == 0)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1ab6601fdd5c..3ef4beacde32 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
+
+ struct work_struct put_work;
};
struct nvmet_fc_port_entry {
@@ -145,8 +147,8 @@ struct nvmet_fc_tgt_queue {
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
- struct rcu_head rcu;
- struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
+ /* array of fcp_iods */
+ struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */;
} __aligned(sizeof(unsigned long long));
struct nvmet_fc_hostport {
@@ -165,10 +167,9 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_hostport *hostport;
struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
- struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
+ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
- struct rcu_head rcu;
};
@@ -248,6 +249,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_put_tgtport_work(struct work_struct *work)
+{
+ struct nvmet_fc_tgtport *tgtport =
+ container_of(work, struct nvmet_fc_tgtport, put_work);
+
+ nvmet_fc_tgtport_put(tgtport);
+}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
@@ -359,7 +367,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
if (!lsop->req_queued) {
spin_unlock_irqrestore(&tgtport->lock, flags);
- return;
+ goto out_putwork;
}
list_del(&lsop->lsreq_list);
@@ -372,7 +380,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
(lsreq->rqstlen + lsreq->rsplen),
DMA_BIDIRECTIONAL);
- nvmet_fc_tgtport_put(tgtport);
+out_putwork:
+ queue_work(nvmet_wq, &tgtport->put_work);
}
static int
@@ -488,8 +497,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
* message is normal. Otherwise, send unless the hostport has
* already been invalidated by the lldd.
*/
- if (!tgtport->ops->ls_req || !assoc->hostport ||
- assoc->hostport->invalid)
+ if (!tgtport->ops->ls_req || assoc->hostport->invalid)
return;
lsop = kzalloc((sizeof(*lsop) +
@@ -801,14 +809,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue)
return NULL;
- if (!nvmet_fc_tgt_a_get(assoc))
- goto out_free_queue;
-
queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
assoc->tgtport->fc_target_port.port_num,
assoc->a_id, qid);
if (!queue->work_q)
- goto out_a_put;
+ goto out_free_queue;
queue->qid = qid;
queue->sqsize = sqsize;
@@ -830,15 +835,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;
WARN_ON(assoc->queues[qid]);
- rcu_assign_pointer(assoc->queues[qid], queue);
+ assoc->queues[qid] = queue;
return queue;
out_fail_iodlist:
nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
destroy_workqueue(queue->work_q);
-out_a_put:
- nvmet_fc_tgt_a_put(assoc);
out_free_queue:
kfree(queue);
return NULL;
@@ -851,15 +854,11 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);
- rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
-
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
- nvmet_fc_tgt_a_put(queue->assoc);
-
destroy_workqueue(queue->work_q);
- kfree_rcu(queue, rcu);
+ kfree(queue);
}
static void
@@ -968,7 +967,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
- queue = rcu_dereference(assoc->queues[qid]);
+ queue = assoc->queues[qid];
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
@@ -1030,7 +1029,7 @@ nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
list_for_each_entry(host, &tgtport->host_list, host_list) {
if (host->hosthandle == hosthandle && !host->invalid) {
if (nvmet_fc_hostport_get(host))
- return (host);
+ return host;
}
}
@@ -1077,8 +1076,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
/* new allocation not needed */
kfree(newhost);
newhost = match;
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
} else {
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
@@ -1093,23 +1090,59 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
static void
-nvmet_fc_delete_assoc(struct work_struct *work)
+nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+}
+
+static void
+nvmet_fc_delete_assoc_work(struct work_struct *work)
{
struct nvmet_fc_tgt_assoc *assoc =
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_delete_assoc(assoc);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ nvmet_fc_tgtport_get(assoc->tgtport);
+ queue_work(nvmet_wq, &assoc->del_work);
+}
+
+static bool
+nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id)
+{
+ struct nvmet_fc_tgt_assoc *a;
+ bool found = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) {
+ if (association_id == a->association_id) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return found;
}
static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
- struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
+ struct nvmet_fc_tgt_assoc *assoc;
unsigned long flags;
+ bool done;
u64 ran;
int idx;
- bool needrandom = true;
+
+ if (!tgtport->pe)
+ return NULL;
assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
if (!assoc)
@@ -1119,43 +1152,33 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
if (idx < 0)
goto out_free_assoc;
- if (!nvmet_fc_tgtport_get(tgtport))
- goto out_ida;
-
assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
if (IS_ERR(assoc->hostport))
- goto out_put;
+ goto out_ida;
assoc->tgtport = tgtport;
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
- INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
atomic_set(&assoc->terminating, 0);
- while (needrandom) {
+ done = false;
+ do {
get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
ran = ran << BYTES_FOR_QID_SHIFT;
spin_lock_irqsave(&tgtport->lock, flags);
- needrandom = false;
- list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
- if (ran == tmpassoc->association_id) {
- needrandom = true;
- break;
- }
- }
- if (!needrandom) {
+ if (!nvmet_fc_assoc_exists(tgtport, ran)) {
assoc->association_id = ran;
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
+ done = true;
}
spin_unlock_irqrestore(&tgtport->lock, flags);
- }
+ } while (!done);
return assoc;
-out_put:
- nvmet_fc_tgtport_put(tgtport);
out_ida:
ida_free(&tgtport->assoc_cnt, idx);
out_free_assoc:
@@ -1171,13 +1194,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_ls_iod *oldls;
unsigned long flags;
+ int i;
+
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+ if (assoc->queues[i])
+ nvmet_fc_delete_target_queue(assoc->queues[i]);
+ }
/* Send Disconnect now that all i/o has completed */
nvmet_fc_xmt_disconnect_assoc(assoc);
nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
- list_del_rcu(&assoc->a_list);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1187,8 +1215,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
- kfree_rcu(assoc, rcu);
- nvmet_fc_tgtport_put(tgtport);
+ kfree(assoc);
}
static void
@@ -1207,7 +1234,7 @@ static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
int i, terminating;
terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1216,29 +1243,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
if (terminating)
return;
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del_rcu(&assoc->a_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
- for (i = NVMET_NR_QUEUES; i >= 0; i--) {
- rcu_read_lock();
- queue = rcu_dereference(assoc->queues[i]);
- if (!queue) {
- rcu_read_unlock();
- continue;
- }
+ synchronize_rcu();
- if (!nvmet_fc_tgt_q_get(queue)) {
- rcu_read_unlock();
- continue;
- }
- rcu_read_unlock();
- nvmet_fc_delete_target_queue(queue);
- nvmet_fc_tgt_q_put(queue);
+ /* ensure all in-flight I/Os have been processed */
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+ if (assoc->queues[i])
+ flush_workqueue(assoc->queues[i]->work_q);
}
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
-
- nvmet_fc_tgt_a_put(assoc);
}
static struct nvmet_fc_tgt_assoc *
@@ -1414,6 +1433,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
+ INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@@ -1491,9 +1511,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
}
rcu_read_unlock();
}
@@ -1539,16 +1558,14 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next,
&tgtport->assoc_list, a_list) {
- if (!assoc->hostport ||
- assoc->hostport->hosthandle != hosthandle)
+ if (assoc->hostport->hosthandle != hosthandle)
continue;
if (!nvmet_fc_tgt_a_get(assoc))
continue;
assoc->hostport->invalid = 1;
noassoc = false;
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
@@ -1580,7 +1597,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
- queue = rcu_dereference(assoc->queues[0]);
+ queue = assoc->queues[0];
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
@@ -1592,9 +1609,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
return;
}
@@ -1624,6 +1640,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);
+ flush_workqueue(nvmet_wq);
+
/*
* should terminate LS's as well. However, LS's will be generated
* at the tail end of association termination, so they likely don't
@@ -1869,9 +1887,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
FCNVME_LS_DISCONNECT_ASSOC);
- /* release get taken in nvmet_fc_find_target_assoc */
- nvmet_fc_tgt_a_put(assoc);
-
/*
* The rules for LS response says the response cannot
* go back until ABTS's have been sent for all outstanding
@@ -1886,8 +1901,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc->rcv_disconn = iod;
spin_unlock_irqrestore(&tgtport->lock, flags);
- nvmet_fc_delete_target_assoc(assoc);
-
if (oldls) {
dev_info(tgtport->dev,
"{%d:%d} Multiple Disconnect Association LS's "
@@ -1903,6 +1916,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
}
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+
return false;
}
@@ -2539,8 +2555,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.cqe = &fod->rspiubuf.cqe;
- if (tgtport->pe)
- fod->req.port = tgtport->pe->port;
+ if (!tgtport->pe)
+ goto transport_error;
+ fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
@@ -2901,6 +2918,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)
nvmet_fc_portentry_unbind(pe);
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(pe->tgtport);
+
kfree(pe);
}
@@ -2914,6 +2934,38 @@ nvmet_fc_discovery_chg(struct nvmet_port *port)
tgtport->ops->discovery_event(&tgtport->fc_target_port);
}
+static ssize_t
+nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_size)
+{
+ struct nvmet_sq *sq = ctrl->sqs[0];
+ struct nvmet_fc_tgt_queue *queue =
+ container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq);
+ struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL;
+ struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL;
+ u64 wwnn, wwpn;
+ ssize_t ret = 0;
+
+ if (!tgtport || !nvmet_fc_tgtport_get(tgtport))
+ return -ENODEV;
+ if (!hostport || !nvmet_fc_hostport_get(hostport)) {
+ ret = -ENODEV;
+ goto out_put;
+ }
+
+ if (tgtport->ops->host_traddr) {
+ ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn);
+ if (ret)
+ goto out_put_host;
+ ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn);
+ }
+out_put_host:
+ nvmet_fc_hostport_put(hostport);
+out_put:
+ nvmet_fc_tgtport_put(tgtport);
+ return ret;
+}
+
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_FC,
@@ -2923,6 +2975,7 @@ static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
.queue_response = nvmet_fc_fcp_nvme_cmd_done,
.delete_ctrl = nvmet_fc_delete_ctrl,
.discovery_chg = nvmet_fc_discovery_chg,
+ .host_traddr = nvmet_fc_host_traddr,
};
static int __init nvmet_fc_init_module(void)
@@ -2932,6 +2985,9 @@ static int __init nvmet_fc_init_module(void)
static void __exit nvmet_fc_exit_module(void)
{
+ /* ensure any shutdown operation, e.g. delete ctrls have finished */
+ flush_workqueue(nvmet_wq);
+
/* sanity check - all lports should be removed */
if (!list_empty(&nvmet_fc_target_list))
pr_warn("%s: targetport list not empty\n", __func__);
@@ -2944,4 +3000,5 @@ static void __exit nvmet_fc_exit_module(void)
module_init(nvmet_fc_init_module);
module_exit(nvmet_fc_exit_module);
+MODULE_DESCRIPTION("NVMe target FC transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index c65a73433c05..e1abb27927ff 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
spin_lock(&rport->lock);
- list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work);
return ret;
@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
if (remoteport) {
rport = remoteport->private;
spin_lock(&rport->lock);
- list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work);
}
@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
if (!tport->remoteport) {
tls_req->status = -ECONNREFUSED;
spin_lock(&tport->lock);
- list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &tport->ls_list);
spin_unlock(&tport->lock);
queue_work(nvmet_wq, &tport->ls_work);
return ret;
@@ -492,6 +492,16 @@ fcloop_t2h_host_release(void *hosthandle)
/* host handle ignored for now */
}
+static int
+fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
+{
+ struct fcloop_rport *rport = hosthandle;
+
+ *wwnn = rport->lport->localport->node_name;
+ *wwpn = rport->lport->localport->port_name;
+ return 0;
+}
+
/*
* Simulate reception of RSCN and converting it to a initiator transport
* call to rescan a remote port.
@@ -995,11 +1005,6 @@ fcloop_nport_free(struct kref *ref)
{
struct fcloop_nport *nport =
container_of(ref, struct fcloop_nport, ref);
- unsigned long flags;
-
- spin_lock_irqsave(&fcloop_lock, flags);
- list_del(&nport->nport_list);
- spin_unlock_irqrestore(&fcloop_lock, flags);
kfree(nport);
}
@@ -1079,6 +1084,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.ls_req = fcloop_t2h_ls_req,
.ls_abort = fcloop_t2h_ls_abort,
.host_release = fcloop_t2h_host_release,
+ .host_traddr = fcloop_t2h_host_traddr,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
@@ -1357,6 +1363,8 @@ __unlink_remote_port(struct fcloop_nport *nport)
nport->tport->remoteport = NULL;
nport->rport = NULL;
+ list_del(&nport->nport_list);
+
return rport;
}
@@ -1559,7 +1567,9 @@ static const struct attribute_group *fcloop_dev_attr_groups[] = {
NULL,
};
-static struct class *fcloop_class;
+static const struct class fcloop_class = {
+ .name = "fcloop",
+};
static struct device *fcloop_device;
@@ -1567,15 +1577,14 @@ static int __init fcloop_init(void)
{
int ret;
- fcloop_class = class_create("fcloop");
- if (IS_ERR(fcloop_class)) {
+ ret = class_register(&fcloop_class);
+ if (ret) {
pr_err("couldn't register class fcloop\n");
- ret = PTR_ERR(fcloop_class);
return ret;
}
fcloop_device = device_create_with_groups(
- fcloop_class, NULL, MKDEV(0, 0), NULL,
+ &fcloop_class, NULL, MKDEV(0, 0), NULL,
fcloop_dev_attr_groups, "ctl");
if (IS_ERR(fcloop_device)) {
pr_err("couldn't create ctl device!\n");
@@ -1588,7 +1597,7 @@ static int __init fcloop_init(void)
return 0;
out_destroy_class:
- class_destroy(fcloop_class);
+ class_unregister(&fcloop_class);
return ret;
}
@@ -1646,11 +1655,12 @@ static void __exit fcloop_exit(void)
put_device(fcloop_device);
- device_destroy(fcloop_class, MKDEV(0, 0));
- class_destroy(fcloop_class);
+ device_destroy(&fcloop_class, MKDEV(0, 0));
+ class_unregister(&fcloop_class);
}
module_init(fcloop_init);
module_exit(fcloop_exit);
+MODULE_DESCRIPTION("NVMe target FC loop transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 2733e0158585..0bda83d0fc3e 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -50,9 +50,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
- if (ns->bdev) {
- blkdev_put(ns->bdev, NULL);
+ if (ns->bdev_file) {
+ fput(ns->bdev_file);
ns->bdev = NULL;
+ ns->bdev_file = NULL;
}
}
@@ -60,15 +61,17 @@ static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
{
struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
- if (bi) {
+ if (!bi)
+ return;
+
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) {
ns->metadata_size = bi->tuple_size;
- if (bi->profile == &t10_pi_type1_crc)
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
ns->pi_type = NVME_NS_DPS_PI_TYPE1;
- else if (bi->profile == &t10_pi_type3_crc)
- ns->pi_type = NVME_NS_DPS_PI_TYPE3;
else
- /* Unsupported metadata type */
- ns->metadata_size = 0;
+ ns->pi_type = NVME_NS_DPS_PI_TYPE3;
+ } else {
+ ns->metadata_size = 0;
}
}
@@ -84,23 +87,24 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
if (ns->buffered_io)
return -ENOTBLK;
- ns->bdev = blkdev_get_by_path(ns->device_path,
- BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(ns->bdev)) {
- ret = PTR_ERR(ns->bdev);
+ ns->bdev_file = bdev_file_open_by_path(ns->device_path,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
+ if (IS_ERR(ns->bdev_file)) {
+ ret = PTR_ERR(ns->bdev_file);
if (ret != -ENOTBLK) {
- pr_err("failed to open block device %s: (%ld)\n",
- ns->device_path, PTR_ERR(ns->bdev));
+ pr_err("failed to open block device %s: (%d)\n",
+ ns->device_path, ret);
}
- ns->bdev = NULL;
+ ns->bdev_file = NULL;
return ret;
}
+ ns->bdev = file_bdev(ns->bdev_file);
ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
ns->pi_type = 0;
ns->metadata_size = 0;
- if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
nvmet_bdev_ns_enable_integrity(ns);
if (bdev_is_zoned(ns->bdev)) {
@@ -133,11 +137,11 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
*/
switch (blk_sts) {
case BLK_STS_NOSPC:
- status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+ status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_rw_command, length);
break;
case BLK_STS_TARGET:
- status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_rw_command, slba);
break;
case BLK_STS_NOTSUPP:
@@ -145,10 +149,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
switch (req->cmd->common.opcode) {
case nvme_cmd_dsm:
case nvme_cmd_write_zeroes:
- status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
+ status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
break;
default:
- status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
break;
case BLK_STS_MEDIUM:
@@ -157,7 +161,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
break;
case BLK_STS_IOERR:
default:
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode);
}
@@ -206,12 +210,11 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
return PTR_ERR(bip);
}
- bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
/* virtual start sector must be in integrity interval units */
bip_set_seed(bip, bio->bi_iter.bi_sector >>
(bi->interval_exp - SECTOR_SHIFT));
- resid = bip->bip_iter.bi_size;
+ resid = bio_integrity_bytes(bi, bio_sectors(bio));
while (resid > 0 && sg_miter_next(miter)) {
len = min_t(size_t, miter->length, resid);
rc = bio_integrity_add_page(bio, miter->page, len,
@@ -355,7 +358,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req)
return 0;
if (blkdev_issue_flush(req->ns->bdev))
- return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
return 0;
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 48d5df054cd0..e32790d8fc26 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -400,7 +400,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
}
nvme_quiesce_admin_queue(&ctrl->ctrl);
- if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
nvme_disable_ctrl(&ctrl->ctrl, true);
nvme_cancel_admin_tagset(&ctrl->ctrl);
@@ -434,8 +434,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ if (state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO)
/* state change failure for non-deleted ctrl? */
WARN_ON_ONCE(1);
return;
@@ -466,6 +468,8 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
out_destroy_io:
nvme_loop_destroy_io_queues(ctrl);
out_destroy_admin:
+ nvme_quiesce_admin_queue(&ctrl->ctrl);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -551,6 +555,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
goto out;
}
+ ret = nvme_add_ctrl(&ctrl->ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
WARN_ON_ONCE(1);
@@ -600,11 +608,14 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
return &ctrl->ctrl;
out_remove_admin_queue:
+ nvme_quiesce_admin_queue(&ctrl->ctrl);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
out_free_queues:
kfree(ctrl->queues);
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
nvme_put_ctrl(&ctrl->ctrl);
out:
if (ret > 0)
@@ -684,5 +695,6 @@ static void __exit nvme_loop_cleanup_module(void)
module_init(nvme_loop_init_module);
module_exit(nvme_loop_cleanup_module);
+MODULE_DESCRIPTION("NVMe target loop transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 8cfd60f3b564..190f55e6d753 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -58,6 +58,7 @@
struct nvmet_ns {
struct percpu_ref ref;
+ struct file *bdev_file;
struct block_device *bdev;
struct file *file;
bool readonly;
@@ -112,8 +113,8 @@ struct nvmet_sq {
bool authenticated;
struct delayed_work auth_expired_work;
u16 dhchap_tid;
- u16 dhchap_status;
- int dhchap_step;
+ u8 dhchap_status;
+ u8 dhchap_step;
u8 *dhchap_c1;
u8 *dhchap_c2;
u32 dhchap_s1;
@@ -158,9 +159,11 @@ struct nvmet_port {
struct config_group ana_groups_group;
struct nvmet_ana_group ana_default_group;
enum nvme_ana_state *ana_state;
+ struct key *keyring;
void *priv;
bool enabled;
int inline_data_size;
+ int max_queue_size;
const struct nvmet_fabrics_ops *tr_ops;
bool pi_enable;
};
@@ -178,6 +181,16 @@ static inline struct nvmet_port *ana_groups_to_port(
ana_groups_group);
}
+static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
+{
+ return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
+}
+
+static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
+{
+ return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
+}
+
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;
@@ -217,7 +230,9 @@ struct nvmet_ctrl {
struct device *p2p_client;
struct radix_tree_root p2p_ns_map;
-
+#ifdef CONFIG_NVME_TARGET_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
spinlock_t error_lock;
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
@@ -249,7 +264,9 @@ struct nvmet_subsys {
struct list_head hosts;
bool allow_any_host;
-
+#ifdef CONFIG_NVME_TARGET_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
u16 max_qid;
u64 ver;
@@ -337,6 +354,8 @@ struct nvmet_fabrics_ops {
void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
void (*disc_traddr)(struct nvmet_req *req,
struct nvmet_port *port, char *traddr);
+ ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_len);
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
void (*discovery_chg)(struct nvmet_port *port);
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
@@ -485,6 +504,8 @@ struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
struct nvmet_req *req);
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
u16 nvmet_check_ctrl_status(struct nvmet_req *req);
+ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_len);
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type);
@@ -530,10 +551,12 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
struct nvmet_host *host);
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page);
+bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
-#define NVMET_QUEUE_SIZE 1024
+#define NVMET_MIN_QUEUE_SIZE 16
+#define NVMET_MAX_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128
-#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
+#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
/*
* Nice round number that makes a list of nsids fit into a page.
@@ -699,7 +722,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req);
int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
-int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_init(struct nvmet_sq *sq);
void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_free(struct nvmet_sq *sq);
@@ -718,7 +741,7 @@ int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
u8 *buf, int buf_size);
#else
-static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
{
return 0;
}
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 9fe07d7efa96..24d0e2418d2e 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -132,7 +132,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
- id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
/* don't support fuse commands */
id->fuses = 0;
@@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
- nvmet_passthru_override_id_ctrl(req);
+ status = nvmet_passthru_override_id_ctrl(req);
break;
case NVME_ID_CNS_NS:
- nvmet_passthru_override_id_ns(req);
+ status = nvmet_passthru_override_id_ns(req);
break;
case NVME_ID_CNS_NS_DESC_LIST:
- nvmet_passthru_override_id_descs(req);
+ status = nvmet_passthru_override_id_descs(req);
break;
}
} else if (status < 0)
@@ -306,7 +306,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
ns = nvme_find_get_ns(ctrl, nsid);
if (unlikely(!ns)) {
pr_err("failed to get passthru ns nsid:%u\n", nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
goto out;
}
@@ -426,7 +426,7 @@ u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
* emulated in the future if regular targets grow support for
* this feature.
*/
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
return nvmet_setup_passthru_command(req);
@@ -478,7 +478,7 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
case NVME_FEAT_RESV_PERSIST:
/* No reservations, see nvmet_parse_passthru_io_cmd() */
default:
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
}
@@ -546,7 +546,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
}
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
case NVME_ID_CNS_NS:
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
@@ -558,7 +558,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
}
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
default:
return nvmet_setup_passthru_command(req);
}
@@ -602,7 +602,7 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
goto out_put_file;
}
- old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
+ old = xa_cmpxchg(&passthru_subsystems, ctrl->instance, NULL,
subsys, GFP_KERNEL);
if (xa_is_err(old)) {
ret = xa_err(old);
@@ -635,7 +635,7 @@ out_unlock:
static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
{
if (subsys->passthru_ctrl) {
- xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
+ xa_erase(&passthru_subsystems, subsys->passthru_ctrl->instance);
module_put(subsys->passthru_ctrl->ops->module);
nvme_put_ctrl(subsys->passthru_ctrl);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 4597bca43a6d..1b6264fa5803 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -37,6 +37,8 @@
#define NVMET_RDMA_MAX_MDTS 8
#define NVMET_RDMA_MAX_METADATA_MDTS 5
+#define NVMET_RDMA_BACKLOG 128
+
struct nvmet_rdma_srq;
struct nvmet_rdma_cmd {
@@ -51,7 +53,6 @@ struct nvmet_rdma_cmd {
enum {
NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
- NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
};
struct nvmet_rdma_rsp {
@@ -473,12 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
return 0;
out_free:
- while (--i >= 0) {
- struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
-
- list_del(&rsp->free_list);
- nvmet_rdma_free_rsp(ndev, rsp);
- }
+ while (--i >= 0)
+ nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps);
out:
return ret;
@@ -489,12 +486,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
struct nvmet_rdma_device *ndev = queue->dev;
int i, nr_rsps = queue->recv_queue_size * 2;
- for (i = 0; i < nr_rsps; i++) {
- struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
-
- list_del(&rsp->free_list);
- nvmet_rdma_free_rsp(ndev, rsp);
- }
+ for (i = 0; i < nr_rsps; i++)
+ nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps);
}
@@ -585,8 +578,8 @@ static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
if (control & NVME_RW_PRINFO_PRCHK_REF)
domain->sig.dif.ref_remap = true;
- domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
- domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm);
domain->sig.dif.app_escape = true;
if (pi_type == NVME_NS_DPS_PI_TYPE3)
domain->sig.dif.ref_escape = true;
@@ -720,7 +713,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
struct ib_send_wr *first_wr;
- if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
+ if (rsp->invalidate_rkey) {
rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
} else {
@@ -859,12 +852,12 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
if (!nvme_is_write(rsp->req.cmd)) {
rsp->req.error_loc =
offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
if (off + len > rsp->queue->dev->inline_data_size) {
pr_err("invalid inline data offset!\n");
- return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR;
}
/* no data command? */
@@ -903,10 +896,8 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
goto error_out;
rsp->n_rdma += ret;
- if (invalidate) {
+ if (invalidate)
rsp->invalidate_rkey = key;
- rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
- }
return 0;
@@ -928,7 +919,7 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
pr_err("invalid SGL subtype: %#x\n", sgl->type);
rsp->req.error_loc =
offsetof(struct nvme_common_command, dptr);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
case NVME_KEY_SGL_FMT_DATA_DESC:
switch (sgl->type & 0xf) {
@@ -940,12 +931,12 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
pr_err("invalid SGL subtype: %#x\n", sgl->type);
rsp->req.error_loc =
offsetof(struct nvme_common_command, dptr);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
default:
pr_err("invalid SGL type: %#x\n", sgl->type);
rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
- return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
}
}
@@ -1045,6 +1036,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rsp->req.cmd = cmd->nvme_cmd;
rsp->req.port = queue->port;
rsp->n_rdma = 0;
+ rsp->invalidate_rkey = 0;
if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
unsigned long flags;
@@ -1583,8 +1575,19 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
}
if (queue->host_qid == 0) {
- /* Let inflight controller teardown complete */
- flush_workqueue(nvmet_wq);
+ struct nvmet_rdma_queue *q;
+ int pending = 0;
+
+ /* Check for pending controller teardown */
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) {
+ if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl &&
+ q->state == NVMET_RDMA_Q_DISCONNECTING)
+ pending++;
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+ if (pending > NVMET_RDMA_BACKLOG)
+ return NVME_SC_CONNECT_CTRL_BUSY;
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@@ -1803,18 +1806,14 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
{
- struct nvmet_rdma_queue *queue;
+ struct nvmet_rdma_queue *queue, *n;
-restart:
mutex_lock(&nvmet_rdma_queue_mutex);
- list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
- if (queue->nvme_sq.ctrl == ctrl) {
- list_del_init(&queue->queue_list);
- mutex_unlock(&nvmet_rdma_queue_mutex);
-
- __nvmet_rdma_queue_disconnect(queue);
- goto restart;
- }
+ list_for_each_entry_safe(queue, n, &nvmet_rdma_queue_list, queue_list) {
+ if (queue->nvme_sq.ctrl != ctrl)
+ continue;
+ list_del_init(&queue->queue_list);
+ __nvmet_rdma_queue_disconnect(queue);
}
mutex_unlock(&nvmet_rdma_queue_mutex);
}
@@ -1880,7 +1879,7 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
- ret = rdma_listen(cm_id, 128);
+ ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG);
if (ret) {
pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
@@ -1943,6 +1942,14 @@ static int nvmet_rdma_add_port(struct nvmet_port *nport)
nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
}
+ if (nport->max_queue_size < 0) {
+ nport->max_queue_size = NVME_RDMA_DEFAULT_QUEUE_SIZE;
+ } else if (nport->max_queue_size > NVME_RDMA_MAX_QUEUE_SIZE) {
+ pr_warn("max_queue_size %u is too large, reducing to %u\n",
+ nport->max_queue_size, NVME_RDMA_MAX_QUEUE_SIZE);
+ nport->max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
+ }
+
ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
nport->disc_addr.trsvcid, &port->addr);
if (ret) {
@@ -1993,6 +2000,17 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
}
}
+static ssize_t nvmet_rdma_host_port_addr(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_len)
+{
+ struct nvmet_sq *nvme_sq = ctrl->sqs[0];
+ struct nvmet_rdma_queue *queue =
+ container_of(nvme_sq, struct nvmet_rdma_queue, nvme_sq);
+
+ return snprintf(traddr, traddr_len, "%pISc",
+ (struct sockaddr *)&queue->cm_id->route.addr.dst_addr);
+}
+
static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
{
if (ctrl->pi_support)
@@ -2002,6 +2020,8 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
{
+ if (ctrl->pi_support)
+ return NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
return NVME_RDMA_MAX_QUEUE_SIZE;
}
@@ -2015,6 +2035,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.queue_response = nvmet_rdma_queue_response,
.delete_ctrl = nvmet_rdma_delete_ctrl,
.disc_traddr = nvmet_rdma_disc_port_addr,
+ .host_traddr = nvmet_rdma_host_port_addr,
.get_mdts = nvmet_rdma_get_mdts,
.get_max_queue_size = nvmet_rdma_get_max_queue_size,
};
@@ -2091,5 +2112,6 @@ static void __exit nvmet_rdma_exit(void)
module_init(nvmet_rdma_init);
module_exit(nvmet_rdma_exit);
+MODULE_DESCRIPTION("NVMe target RDMA transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 868aa4de2e4c..7c51c2a8c109 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -8,9 +8,14 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/key.h>
#include <linux/nvme-tcp.h>
+#include <linux/nvme-keyring.h>
#include <net/sock.h>
#include <net/tcp.h>
+#include <net/tls.h>
+#include <net/tls_prot.h>
+#include <net/handshake.h>
#include <linux/inet.h>
#include <linux/llist.h>
#include <crypto/hash.h>
@@ -19,6 +24,8 @@
#include "nvmet.h"
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
+#define NVMET_TCP_BACKLOG 128
static int param_store_val(const char *str, int *val, int min, int max)
{
@@ -66,6 +73,16 @@ device_param_cb(idle_poll_period_usecs, &set_param_ops,
MODULE_PARM_DESC(idle_poll_period_usecs,
"nvmet tcp io_work poll till idle time period in usecs: Default 0");
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+/*
+ * TLS handshake timeout
+ */
+static int tls_handshake_timeout = 10;
+module_param(tls_handshake_timeout, int, 0644);
+MODULE_PARM_DESC(tls_handshake_timeout,
+ "nvme TLS handshake timeout in seconds (default 10)");
+#endif
+
#define NVMET_TCP_RECV_BUDGET 8
#define NVMET_TCP_SEND_BUDGET 8
#define NVMET_TCP_IO_WORK_BUDGET 64
@@ -104,6 +121,7 @@ struct nvmet_tcp_cmd {
u32 pdu_len;
u32 pdu_recv;
int sg_idx;
+ char recv_cbuf[CMSG_LEN(sizeof(char))];
struct msghdr recv_msg;
struct bio_vec *iov;
u32 flags;
@@ -122,8 +140,10 @@ struct nvmet_tcp_cmd {
enum nvmet_tcp_queue_state {
NVMET_TCP_Q_CONNECTING,
+ NVMET_TCP_Q_TLS_HANDSHAKE,
NVMET_TCP_Q_LIVE,
NVMET_TCP_Q_DISCONNECTING,
+ NVMET_TCP_Q_FAILED,
};
struct nvmet_tcp_queue {
@@ -132,6 +152,7 @@ struct nvmet_tcp_queue {
struct work_struct io_work;
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
+ struct kref kref;
/* send state */
struct nvmet_tcp_cmd *cmds;
@@ -155,6 +176,10 @@ struct nvmet_tcp_queue {
struct ahash_request *snd_hash;
struct ahash_request *rcv_hash;
+ /* TLS state */
+ key_serial_t tls_pskid;
+ struct delayed_work tls_handshake_tmo_work;
+
unsigned long poll_end;
spinlock_t state_lock;
@@ -323,6 +348,7 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
return 0;
}
+/* If cmd buffers are NULL, no operation is performed */
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
{
kfree(cmd->iov);
@@ -348,7 +374,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
while (length) {
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
- bvec_set_page(iov, sg_page(sg), sg->length,
+ bvec_set_page(iov, sg_page(sg), iov_len,
sg->offset + sg_offset);
length -= iov_len;
@@ -372,6 +398,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
{
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
if (status == -EPIPE || status == -ECONNRESET)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
else
@@ -389,10 +416,10 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
NVME_SGL_FMT_OFFSET)) {
if (!nvme_is_write(cmd->req.cmd))
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
if (len > cmd->req.port->inline_data_size)
- return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
+ return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR;
cmd->pdu_len = len;
}
cmd->req.transfer_len += len;
@@ -872,6 +899,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
pr_err("bad nvme-tcp pdu length (%d)\n",
le32_to_cpu(icreq->hdr.plen));
nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
}
if (icreq->pfv != NVME_TCP_PFV_1_0) {
@@ -899,7 +927,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
icresp->hdr.pdo = 0;
icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
- icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
+ icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
icresp->cpda = 0;
if (queue->hdr_digest)
icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
@@ -909,16 +937,14 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
iov.iov_base = icresp;
iov.iov_len = sizeof(*icresp);
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
- if (ret < 0)
- goto free_crypto;
+ if (ret < 0) {
+ queue->state = NVMET_TCP_Q_FAILED;
+ return ret; /* queue removal will cleanup */
+ }
queue->state = NVMET_TCP_Q_LIVE;
nvmet_prepare_receive_pdu(queue);
return 0;
-free_crypto:
- if (queue->hdr_digest || queue->data_digest)
- nvmet_tcp_free_crypto(queue);
- return ret;
}
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
@@ -956,13 +982,13 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
+ unsigned int exp_data_len;
if (likely(queue->nr_cmds)) {
if (unlikely(data->ttag >= queue->nr_cmds)) {
pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
queue->idx, data->ttag, queue->nr_cmds);
- nvmet_tcp_fatal_error(queue);
- return -EPROTO;
+ goto err_proto;
}
cmd = &queue->cmds[data->ttag];
} else {
@@ -973,19 +999,32 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
data->ttag, le32_to_cpu(data->data_offset),
cmd->rbytes_done);
- /* FIXME: use path and transport errors */
- nvmet_req_complete(&cmd->req,
- NVME_SC_INVALID_FIELD | NVME_SC_DNR);
- return -EPROTO;
+ goto err_proto;
}
+ exp_data_len = le32_to_cpu(data->hdr.plen) -
+ nvmet_tcp_hdgst_len(queue) -
+ nvmet_tcp_ddgst_len(queue) -
+ sizeof(*data);
+
cmd->pdu_len = le32_to_cpu(data->data_length);
+ if (unlikely(cmd->pdu_len != exp_data_len ||
+ cmd->pdu_len == 0 ||
+ cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
+ pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
+ goto err_proto;
+ }
cmd->pdu_recv = 0;
nvmet_tcp_build_pdu_iovec(cmd);
queue->cmd = cmd;
queue->rcv_state = NVMET_TCP_RECV_DATA;
return 0;
+
+err_proto:
+ /* FIXME: use proper transport errors */
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
}
static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
@@ -1099,20 +1138,65 @@ static inline bool nvmet_tcp_pdu_valid(u8 type)
return false;
}
+static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
+ struct msghdr *msg, char *cbuf)
+{
+ struct cmsghdr *cmsg = (struct cmsghdr *)cbuf;
+ u8 ctype, level, description;
+ int ret = 0;
+
+ ctype = tls_get_record_type(queue->sock->sk, cmsg);
+ switch (ctype) {
+ case 0:
+ break;
+ case TLS_RECORD_TYPE_DATA:
+ break;
+ case TLS_RECORD_TYPE_ALERT:
+ tls_alert_recv(queue->sock->sk, msg, &level, &description);
+ if (level == TLS_ALERT_LEVEL_FATAL) {
+ pr_err("queue %d: TLS Alert desc %u\n",
+ queue->idx, description);
+ ret = -ENOTCONN;
+ } else {
+ pr_warn("queue %d: TLS Alert desc %u\n",
+ queue->idx, description);
+ ret = -EAGAIN;
+ }
+ break;
+ default:
+ /* discard this record type */
+ pr_err("queue %d: TLS record %d unhandled\n",
+ queue->idx, ctype);
+ ret = -EAGAIN;
+ break;
+ }
+ return ret;
+}
+
static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
- int len;
+ int len, ret;
struct kvec iov;
+ char cbuf[CMSG_LEN(sizeof(char))] = {};
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
recv:
iov.iov_base = (void *)&queue->pdu + queue->offset;
iov.iov_len = queue->left;
+ if (queue->tls_pskid) {
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+ }
len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
if (unlikely(len < 0))
return len;
+ if (queue->tls_pskid) {
+ ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
+ if (ret < 0)
+ return ret;
+ }
queue->offset += len;
queue->left -= len;
@@ -1165,16 +1249,22 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
{
struct nvmet_tcp_cmd *cmd = queue->cmd;
- int ret;
+ int len, ret;
while (msg_data_left(&cmd->recv_msg)) {
- ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
+ len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
cmd->recv_msg.msg_flags);
- if (ret <= 0)
- return ret;
+ if (len <= 0)
+ return len;
+ if (queue->tls_pskid) {
+ ret = nvmet_tcp_tls_record_ok(cmd->queue,
+ &cmd->recv_msg, cmd->recv_cbuf);
+ if (ret < 0)
+ return ret;
+ }
- cmd->pdu_recv += ret;
- cmd->rbytes_done += ret;
+ cmd->pdu_recv += len;
+ cmd->rbytes_done += len;
}
if (queue->data_digest) {
@@ -1192,20 +1282,30 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
{
struct nvmet_tcp_cmd *cmd = queue->cmd;
- int ret;
+ int ret, len;
+ char cbuf[CMSG_LEN(sizeof(char))] = {};
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
.iov_base = (void *)&cmd->recv_ddgst + queue->offset,
.iov_len = queue->left
};
- ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ if (queue->tls_pskid) {
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+ }
+ len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
- if (unlikely(ret < 0))
- return ret;
+ if (unlikely(len < 0))
+ return len;
+ if (queue->tls_pskid) {
+ ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
+ if (ret < 0)
+ return ret;
+ }
- queue->offset += ret;
- queue->left -= ret;
+ queue->offset += len;
+ queue->left -= len;
if (queue->left)
return -EAGAIN;
@@ -1283,14 +1383,27 @@ done:
return ret;
}
+static void nvmet_tcp_release_queue(struct kref *kref)
+{
+ struct nvmet_tcp_queue *queue =
+ container_of(kref, struct nvmet_tcp_queue, kref);
+
+ WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING);
+ queue_work(nvmet_wq, &queue->release_work);
+}
+
static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
{
- spin_lock(&queue->state_lock);
+ spin_lock_bh(&queue->state_lock);
+ if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
+ /* Socket closed during handshake */
+ tls_handshake_cancel(queue->sock->sk);
+ }
if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue->state = NVMET_TCP_Q_DISCONNECTING;
- queue_work(nvmet_wq, &queue->release_work);
+ kref_put(&queue->kref, nvmet_tcp_release_queue);
}
- spin_unlock(&queue->state_lock);
+ spin_unlock_bh(&queue->state_lock);
}
static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
@@ -1372,6 +1485,10 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
if (!c->r2t_pdu)
goto out_free_data;
+ if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
+ c->recv_msg.msg_control = c->recv_cbuf;
+ c->recv_msg.msg_controllen = sizeof(c->recv_cbuf);
+ }
c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
list_add_tail(&c->entry, &queue->free_list);
@@ -1465,18 +1582,13 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
struct nvmet_tcp_cmd *cmd = queue->cmds;
int i;
- for (i = 0; i < queue->nr_cmds; i++, cmd++) {
- if (nvmet_tcp_need_data_in(cmd))
- nvmet_tcp_free_cmd_buffers(cmd);
- }
-
- if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
- nvmet_tcp_free_cmd_buffers(&queue->connect);
+ for (i = 0; i < queue->nr_cmds; i++, cmd++)
+ nvmet_tcp_free_cmd_buffers(cmd);
+ nvmet_tcp_free_cmd_buffers(&queue->connect);
}
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
- struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1485,6 +1597,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
mutex_unlock(&nvmet_tcp_queue_mutex);
nvmet_tcp_restore_socket_callbacks(queue);
+ cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
cancel_work_sync(&queue->io_work);
/* stop accepting incoming data */
queue->rcv_state = NVMET_TCP_RECV_ERR;
@@ -1493,14 +1606,13 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_sq_destroy(&queue->nvme_sq);
cancel_work_sync(&queue->io_work);
nvmet_tcp_free_cmd_data_in_buffers(queue);
- sock_release(queue->sock);
+ /* ->sock will be released by fput() */
+ fput(queue->sock->file);
nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
-
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ page_frag_cache_drain(&queue->pf_cache);
kfree(queue);
}
@@ -1512,8 +1624,13 @@ static void nvmet_tcp_data_ready(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
- if (likely(queue))
- queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+ if (likely(queue)) {
+ if (queue->data_ready)
+ queue->data_ready(sk);
+ if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq,
+ &queue->io_work);
+ }
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -1621,31 +1738,176 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
return ret;
}
-static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
+ int len, ret;
+ struct kvec iov = {
+ .iov_base = (u8 *)&queue->pdu + queue->offset,
+ .iov_len = sizeof(struct nvme_tcp_hdr),
+ };
+ char cbuf[CMSG_LEN(sizeof(char))] = {};
+ struct msghdr msg = {
+ .msg_control = cbuf,
+ .msg_controllen = sizeof(cbuf),
+ .msg_flags = MSG_PEEK,
+ };
+
+ if (nvmet_port_secure_channel_required(queue->port->nport))
+ return 0;
+
+ len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ iov.iov_len, msg.msg_flags);
+ if (unlikely(len < 0)) {
+ pr_debug("queue %d: peek error %d\n",
+ queue->idx, len);
+ return len;
+ }
+
+ ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
+ if (ret < 0)
+ return ret;
+
+ if (len < sizeof(struct nvme_tcp_hdr)) {
+ pr_debug("queue %d: short read, %d bytes missing\n",
+ queue->idx, (int)iov.iov_len - len);
+ return -EAGAIN;
+ }
+ pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n",
+ queue->idx, hdr->type, hdr->hlen, hdr->plen,
+ (int)sizeof(struct nvme_tcp_icreq_pdu));
+ if (hdr->type == nvme_tcp_icreq &&
+ hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
+ hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) {
+ pr_debug("queue %d: icreq detected\n",
+ queue->idx);
+ return len;
+ }
+ return 0;
+}
+
+static void nvmet_tcp_tls_handshake_done(void *data, int status,
+ key_serial_t peerid)
+{
+ struct nvmet_tcp_queue *queue = data;
+
+ pr_debug("queue %d: TLS handshake done, key %x, status %d\n",
+ queue->idx, peerid, status);
+ spin_lock_bh(&queue->state_lock);
+ if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
+ spin_unlock_bh(&queue->state_lock);
+ return;
+ }
+ if (!status) {
+ queue->tls_pskid = peerid;
+ queue->state = NVMET_TCP_Q_CONNECTING;
+ } else
+ queue->state = NVMET_TCP_Q_FAILED;
+ spin_unlock_bh(&queue->state_lock);
+
+ cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
+ if (status)
+ nvmet_tcp_schedule_release_queue(queue);
+ else
+ nvmet_tcp_set_queue_sock(queue);
+ kref_put(&queue->kref, nvmet_tcp_release_queue);
+}
+
+static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w)
+{
+ struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w),
+ struct nvmet_tcp_queue, tls_handshake_tmo_work);
+
+ pr_warn("queue %d: TLS handshake timeout\n", queue->idx);
+ /*
+ * If tls_handshake_cancel() fails we've lost the race with
+ * nvmet_tcp_tls_handshake_done() */
+ if (!tls_handshake_cancel(queue->sock->sk))
+ return;
+ spin_lock_bh(&queue->state_lock);
+ if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
+ spin_unlock_bh(&queue->state_lock);
+ return;
+ }
+ queue->state = NVMET_TCP_Q_FAILED;
+ spin_unlock_bh(&queue->state_lock);
+ nvmet_tcp_schedule_release_queue(queue);
+ kref_put(&queue->kref, nvmet_tcp_release_queue);
+}
+
+static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
+{
+ int ret = -EOPNOTSUPP;
+ struct tls_handshake_args args;
+
+ if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
+ pr_warn("cannot start TLS in state %d\n", queue->state);
+ return -EINVAL;
+ }
+
+ kref_get(&queue->kref);
+ pr_debug("queue %d: TLS ServerHello\n", queue->idx);
+ memset(&args, 0, sizeof(args));
+ args.ta_sock = queue->sock;
+ args.ta_done = nvmet_tcp_tls_handshake_done;
+ args.ta_data = queue;
+ args.ta_keyring = key_serial(queue->port->nport->keyring);
+ args.ta_timeout_ms = tls_handshake_timeout * 1000;
+
+ ret = tls_server_hello_psk(&args, GFP_KERNEL);
+ if (ret) {
+ kref_put(&queue->kref, nvmet_tcp_release_queue);
+ pr_err("failed to start TLS, err=%d\n", ret);
+ } else {
+ queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work,
+ tls_handshake_timeout * HZ);
+ }
+ return ret;
+}
+#else
+static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {}
+#endif
+
+static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
struct socket *newsock)
{
struct nvmet_tcp_queue *queue;
+ struct file *sock_file = NULL;
int ret;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
- if (!queue)
- return -ENOMEM;
+ if (!queue) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
+ kref_init(&queue->kref);
queue->sock = newsock;
queue->port = port;
queue->nr_cmds = 0;
spin_lock_init(&queue->state_lock);
- queue->state = NVMET_TCP_Q_CONNECTING;
+ if (queue->port->nport->disc_addr.tsas.tcp.sectype ==
+ NVMF_TCP_SECTYPE_TLS13)
+ queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
+ else
+ queue->state = NVMET_TCP_Q_CONNECTING;
INIT_LIST_HEAD(&queue->free_list);
init_llist_head(&queue->resp_list);
INIT_LIST_HEAD(&queue->resp_send_list);
+ sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
+ if (IS_ERR(sock_file)) {
+ ret = PTR_ERR(sock_file);
+ goto out_free_queue;
+ }
+
queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
if (queue->idx < 0) {
ret = queue->idx;
- goto out_free_queue;
+ goto out_sock;
}
ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
@@ -1662,11 +1924,33 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
mutex_unlock(&nvmet_tcp_queue_mutex);
+ INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
+ nvmet_tcp_tls_handshake_timeout);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
+ struct sock *sk = queue->sock->sk;
+
+ /* Restore the default callbacks before starting upcall */
+ read_lock_bh(&sk->sk_callback_lock);
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = port->data_ready;
+ read_unlock_bh(&sk->sk_callback_lock);
+ if (!nvmet_tcp_try_peek_pdu(queue)) {
+ if (!nvmet_tcp_tls_handshake(queue))
+ return;
+ /* TLS handshake failed, terminate the connection */
+ goto out_destroy_sq;
+ }
+ /* Not a TLS connection, continue with normal processing */
+ queue->state = NVMET_TCP_Q_CONNECTING;
+ }
+#endif
+
ret = nvmet_tcp_set_queue_sock(queue);
if (ret)
goto out_destroy_sq;
- return 0;
+ return;
out_destroy_sq:
mutex_lock(&nvmet_tcp_queue_mutex);
list_del_init(&queue->queue_list);
@@ -1676,9 +1960,14 @@ out_free_connect:
nvmet_tcp_free_cmd(&queue->connect);
out_ida_remove:
ida_free(&nvmet_tcp_queue_ida, queue->idx);
+out_sock:
+ fput(queue->sock->file);
out_free_queue:
kfree(queue);
- return ret;
+out_release:
+ pr_err("failed to allocate queue, error %d\n", ret);
+ if (!sock_file)
+ sock_release(newsock);
}
static void nvmet_tcp_accept_work(struct work_struct *w)
@@ -1695,11 +1984,7 @@ static void nvmet_tcp_accept_work(struct work_struct *w)
pr_warn("failed to accept err=%d\n", ret);
return;
}
- ret = nvmet_tcp_alloc_queue(port, newsock);
- if (ret) {
- pr_err("failed to allocate queue\n");
- sock_release(newsock);
- }
+ nvmet_tcp_alloc_queue(port, newsock);
}
}
@@ -1779,7 +2064,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
goto err_sock;
}
- ret = kernel_listen(port->sock, 128);
+ ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
if (ret) {
pr_err("failed to listen %d on port sock\n", ret);
goto err_sock;
@@ -1845,13 +2130,26 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
container_of(sq, struct nvmet_tcp_queue, nvme_sq);
if (sq->qid == 0) {
- /* Let inflight controller teardown complete */
- flush_workqueue(nvmet_wq);
+ struct nvmet_tcp_queue *q;
+ int pending = 0;
+
+ /* Check for pending controller teardown */
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
+ if (q->nvme_sq.ctrl == sq->ctrl &&
+ q->state == NVMET_TCP_Q_DISCONNECTING)
+ pending++;
+ }
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+ if (pending > NVMET_TCP_BACKLOG)
+ return NVME_SC_CONNECT_CTRL_BUSY;
}
queue->nr_cmds = sq->size * 2;
- if (nvmet_tcp_alloc_cmds(queue))
+ if (nvmet_tcp_alloc_cmds(queue)) {
+ queue->nr_cmds = 0;
return NVME_SC_INTERNAL;
+ }
return 0;
}
@@ -1871,6 +2169,19 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
}
}
+static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl,
+ char *traddr, size_t traddr_len)
+{
+ struct nvmet_sq *sq = ctrl->sqs[0];
+ struct nvmet_tcp_queue *queue =
+ container_of(sq, struct nvmet_tcp_queue, nvme_sq);
+
+ if (queue->sockaddr_peer.ss_family == AF_UNSPEC)
+ return -EINVAL;
+ return snprintf(traddr, traddr_len, "%pISc",
+ (struct sockaddr *)&queue->sockaddr_peer);
+}
+
static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_TCP,
@@ -1881,6 +2192,7 @@ static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
.delete_ctrl = nvmet_tcp_delete_ctrl,
.install_queue = nvmet_tcp_install_queue,
.disc_traddr = nvmet_tcp_disc_port_addr,
+ .host_traddr = nvmet_tcp_host_port_addr,
};
static int __init nvmet_tcp_init(void)
@@ -1916,10 +2228,12 @@ static void __exit nvmet_tcp_exit(void)
flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq);
+ ida_destroy(&nvmet_tcp_queue_ida);
}
module_init(nvmet_tcp_init);
module_exit(nvmet_tcp_exit);
+MODULE_DESCRIPTION("NVMe target TCP transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index bff454d46255..8d1806a82887 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -119,6 +119,67 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
}
}
+static const char *nvmet_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const zsa_strs[] = {
+ [0x01] = "close zone",
+ [0x02] = "finish zone",
+ [0x03] = "open zone",
+ [0x04] = "reset zone",
+ [0x05] = "offline zone",
+ [0x10] = "set zone descriptor extension"
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ const char *zsa_str;
+ u8 zsa = cdw10[12];
+ u8 all = cdw10[13];
+
+ if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa])
+ zsa_str = zsa_strs[zsa];
+ else
+ zsa_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u",
+ slba, zsa, zsa_str, all);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const zrasf_strs[] = {
+ [0x00] = "list all zones",
+ [0x01] = "list the zones in the ZSE: Empty state",
+ [0x02] = "list the zones in the ZSIO: Implicitly Opened state",
+ [0x03] = "list the zones in the ZSEO: Explicitly Opened state",
+ [0x04] = "list the zones in the ZSC: Closed state",
+ [0x05] = "list the zones in the ZSF: Full state",
+ [0x06] = "list the zones in the ZSRO: Read Only state",
+ [0x07] = "list the zones in the ZSO: Offline state",
+ [0x09] = "list the zones that have the zone attribute"
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 numd = get_unaligned_le32(&cdw10[8]);
+ u8 zra = cdw10[12];
+ u8 zrasf = cdw10[13];
+ const char *zrasf_str;
+ u8 pr = cdw10[14];
+
+ if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf])
+ zrasf_str = zrasf_strs[zrasf];
+ else
+ zrasf_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
+ slba, numd, zra, zrasf, zrasf_str, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
u8 opcode, u8 *cdw10)
{
@@ -126,9 +187,14 @@ const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
case nvme_cmd_read:
case nvme_cmd_write:
case nvme_cmd_write_zeroes:
+ case nvme_cmd_zone_append:
return nvmet_trace_read_write(p, cdw10);
case nvme_cmd_dsm:
return nvmet_trace_dsm(p, cdw10);
+ case nvme_cmd_zone_mgmt_send:
+ return nvmet_trace_zone_mgmt_send(p, cdw10);
+ case nvme_cmd_zone_mgmt_recv:
+ return nvmet_trace_zone_mgmt_recv(p, cdw10);
default:
return nvmet_trace_common(p, cdw10);
}
@@ -176,6 +242,34 @@ static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p,
return ret;
}
+static const char *nvmet_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 tl = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+ spsp0, spsp1, secp, tl);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvmet_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 al = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+ spsp0, spsp1, secp, al);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -195,6 +289,10 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p,
return nvmet_trace_fabrics_connect(p, spc);
case nvme_fabrics_type_property_get:
return nvmet_trace_fabrics_property_get(p, spc);
+ case nvme_fabrics_type_auth_send:
+ return nvmet_trace_fabrics_auth_send(p, spc);
+ case nvme_fabrics_type_auth_receive:
+ return nvmet_trace_fabrics_auth_receive(p, spc);
default:
return nvmet_trace_fabrics_common(p, spc);
}
@@ -211,7 +309,7 @@ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
return ret;
}
-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
+const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -224,8 +322,8 @@ const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
* If we can know the extra data of the connect command in this stage,
* we can update this print statement later.
*/
- if (ctrl)
- trace_seq_printf(p, "%d", ctrl->cntlid);
+ if (ctrl_id)
+ trace_seq_printf(p, "%d", ctrl_id);
else
trace_seq_printf(p, "_");
trace_seq_putc(p, 0);
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index 6109b3806b12..7f7ebf9558e5 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -32,18 +32,24 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) : \
nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
-#define __print_ctrl_name(ctrl) \
- nvmet_trace_ctrl_name(p, ctrl)
+const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id);
+#define __print_ctrl_id(ctrl_id) \
+ nvmet_trace_ctrl_id(p, ctrl_id)
const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
#define __print_disk_name(name) \
nvmet_trace_disk_name(p, name)
#ifndef TRACE_HEADER_MULTI_READ
-static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
+static inline u16 nvmet_req_to_ctrl_id(struct nvmet_req *req)
{
- return req->sq->ctrl;
+ /*
+ * The queue and controller pointers are not valid until an association
+ * has been established.
+ */
+ if (!req->sq || !req->sq->ctrl)
+ return 0;
+ return req->sq->ctrl->cntlid;
}
static inline void __assign_req_name(char *name, struct nvmet_req *req)
@@ -53,8 +59,7 @@ static inline void __assign_req_name(char *name, struct nvmet_req *req)
return;
}
- strncpy(name, req->ns->device_path,
- min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
+ strscpy_pad(name, req->ns->device_path, DISK_NAME_LEN);
}
#endif
@@ -63,7 +68,7 @@ TRACE_EVENT(nvmet_req_init,
TP_ARGS(req, cmd),
TP_STRUCT__entry(
__field(struct nvme_command *, cmd)
- __field(struct nvmet_ctrl *, ctrl)
+ __field(u16, ctrl_id)
__array(char, disk, DISK_NAME_LEN)
__field(int, qid)
__field(u16, cid)
@@ -76,7 +81,7 @@ TRACE_EVENT(nvmet_req_init,
),
TP_fast_assign(
__entry->cmd = cmd;
- __entry->ctrl = nvmet_req_to_ctrl(req);
+ __entry->ctrl_id = nvmet_req_to_ctrl_id(req);
__assign_req_name(__entry->disk, req);
__entry->qid = req->sq->qid;
__entry->cid = cmd->common.command_id;
@@ -85,12 +90,12 @@ TRACE_EVENT(nvmet_req_init,
__entry->flags = cmd->common.flags;
__entry->nsid = le32_to_cpu(cmd->common.nsid);
__entry->metadata = le64_to_cpu(cmd->common.metadata);
- memcpy(__entry->cdw10, &cmd->common.cdw10,
+ memcpy(__entry->cdw10, &cmd->common.cdws,
sizeof(__entry->cdw10));
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
"meta=%#llx, cmd=(%s, %s)",
- __print_ctrl_name(__entry->ctrl),
+ __print_ctrl_id(__entry->ctrl_id),
__print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
@@ -104,7 +109,7 @@ TRACE_EVENT(nvmet_req_complete,
TP_PROTO(struct nvmet_req *req),
TP_ARGS(req),
TP_STRUCT__entry(
- __field(struct nvmet_ctrl *, ctrl)
+ __field(u16, ctrl_id)
__array(char, disk, DISK_NAME_LEN)
__field(int, qid)
__field(int, cid)
@@ -112,7 +117,7 @@ TRACE_EVENT(nvmet_req_complete,
__field(u16, status)
),
TP_fast_assign(
- __entry->ctrl = nvmet_req_to_ctrl(req);
+ __entry->ctrl_id = nvmet_req_to_ctrl_id(req);
__entry->qid = req->cq->qid;
__entry->cid = req->cqe->command_id;
__entry->result = le64_to_cpu(req->cqe->result.u64);
@@ -120,7 +125,7 @@ TRACE_EVENT(nvmet_req_complete,
__assign_req_name(__entry->disk, req);
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
- __print_ctrl_name(__entry->ctrl),
+ __print_ctrl_id(__entry->ctrl_id),
__print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->result, __entry->status)
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 5b5c1e481722..af9e13be7678 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -52,14 +52,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
return false;
/*
- * ZNS does not define a conventional zone type. If the underlying
- * device has a bitmap set indicating the existence of conventional
- * zones, reject the device. Otherwise, use report zones to detect if
- * the device has conventional zones.
+ * ZNS does not define a conventional zone type. Use report zones
+ * to detect if the device has conventional zones and reject it if
+ * it does.
*/
- if (ns->bdev->bd_disk->conv_zones_bitmap)
- return false;
-
ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
validate_conv_zones_cb, NULL);
if (ret < 0)
@@ -104,7 +100,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
goto out;
}
@@ -125,7 +121,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
}
if (!bdev_is_zoned(req->ns->bdev)) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_identify, nsid);
goto out;
}
@@ -162,17 +158,17 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
- return NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ return NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
}
if (out_bufsize < sizeof(struct nvme_zone_report)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
switch (req->cmd->zmr.pr) {
@@ -181,7 +177,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
break;
default:
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
switch (req->cmd->zmr.zrasf) {
@@ -197,7 +193,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
default:
req->error_loc =
offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
return NVME_SC_SUCCESS;
@@ -345,7 +341,7 @@ static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
return NVME_SC_SUCCESS;
case -EINVAL:
case -EIO:
- return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ return NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR;
default:
return NVME_SC_INTERNAL;
}
@@ -456,8 +452,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
switch (zsa_req_op(req->cmd->zms.zsa)) {
case REQ_OP_ZONE_RESET:
ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
- get_capacity(req->ns->bdev->bd_disk),
- GFP_KERNEL);
+ get_capacity(req->ns->bdev->bd_disk));
if (ret < 0)
return blkdev_zone_mgmt_errno_to_nvme_status(ret);
break;
@@ -468,7 +463,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
default:
/* this is needed to quiet compiler warning */
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
- return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
return NVME_SC_SUCCESS;
@@ -486,7 +481,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
if (op == REQ_OP_LAST) {
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
- status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR;
goto out;
}
@@ -498,17 +493,17 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
if (sect >= get_capacity(bdev->bd_disk)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
- status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
goto out;
}
if (sect & (zone_sectors - 1)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
- ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
+ ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors);
if (ret < 0)
status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
@@ -556,13 +551,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
req->error_loc = offsetof(struct nvme_rw_command, slba);
- status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
goto out;
}
if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
req->error_loc = offsetof(struct nvme_rw_command, slba);
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out;
}
@@ -595,7 +590,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
}
if (total_len != nvmet_rw_data_len(req)) {
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
goto out_put_bio;
}