From 1ad11eafc63ac16e667853bee4273879226d2d1b Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 7 Mar 2023 14:32:43 -0600 Subject: nvme-pci: drop redundant pci_enable_pcie_error_reporting() pci_enable_pcie_error_reporting() enables the device to send ERR_* Messages. Since f26e58bf6f54 ("PCI/AER: Enable error reporting when AER is native"), the PCI core does this for all devices during enumeration, so the driver doesn't need to do it itself. Remove the redundant pci_enable_pcie_error_reporting() call from the driver. Also remove the corresponding pci_disable_pcie_error_reporting() from the driver .remove() path. Note that this only controls ERR_* Messages from the device. An ERR_* Message may cause the Root Port to generate an interrupt, depending on the AER Root Error Command register managed by the AER service driver. Signed-off-by: Bjorn Helgaas Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5b95c94ee40f..593f86323e25 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -5,7 +5,6 @@ */ #include -#include #include #include #include @@ -2535,7 +2534,6 @@ static int nvme_pci_enable(struct nvme_dev *dev) nvme_map_cmb(dev); - pci_enable_pcie_error_reporting(pdev); pci_save_state(pdev); result = nvme_pci_configure_admin_queue(dev); @@ -2600,10 +2598,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) nvme_suspend_io_queues(dev); nvme_suspend_queue(dev, 0); pci_free_irq_vectors(pdev); - if (pci_is_enabled(pdev)) { - pci_disable_pcie_error_reporting(pdev); + if (pci_is_enabled(pdev)) pci_disable_device(pdev); - } nvme_reap_pending_cqes(dev); nvme_cancel_tagset(&dev->ctrl); -- cgit v1.2.3 From ab76e7206b672b2e8818cb121a04506956d6b223 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Tue, 14 Mar 2023 15:20:36 +0900 Subject: nvmet: fix error handling in nvmet_execute_identify_cns_cs_ns() Nvme specifications state that: If the I/O Command Set associated with the namespace identified by the NSID field does not support the Identify Namespace data structure specified by the CSI field, the controller shall abort the command with a status code of Invalid Field in Command. In other words, if nvmet_execute_identify_cns_cs_ns() is called for a target with a block device that is not zoned, we should not return any data and set the status to NVME_SC_INVALID_FIELD. While at it, it is also better to revalidate the ns block devie *before* checking if the block device is zoned, to ensure that nvmet_execute_identify_cns_cs_ns() operates against updated device characteristics. Fixes: aaf2e048af27 ("nvmet: add ZBD over ZNS backend support") Signed-off-by: Damien Le Moal Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/zns.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 7e4292d88016..77ef0a86ff61 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -97,7 +97,7 @@ out: void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) { - struct nvme_id_ns_zns *id_zns; + struct nvme_id_ns_zns *id_zns = NULL; u64 zsze; u16 status; u32 mar, mor; @@ -118,16 +118,18 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) if (status) goto done; - if (!bdev_is_zoned(req->ns->bdev)) { - req->error_loc = offsetof(struct nvme_identify, nsid); - goto done; - } - if (nvmet_ns_revalidate(req->ns)) { mutex_lock(&req->ns->subsys->lock); nvmet_ns_changed(req->ns->subsys, req->ns->nsid); mutex_unlock(&req->ns->subsys->lock); } + + if (!bdev_is_zoned(req->ns->bdev)) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + req->error_loc = offsetof(struct nvme_identify, nsid); + goto out; + } + zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> req->ns->blksize_shift; id_zns->lbafe[0].zsze = cpu_to_le64(zsze); @@ -148,8 +150,8 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) done: status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); - kfree(id_zns); out: + kfree(id_zns); nvmet_req_complete(req, status); } -- cgit v1.2.3 From 8c098aa00118c35108f0c19bd3cdc45e11574948 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Wed, 15 Mar 2023 19:59:35 +0900 Subject: nvmet: fix Identify Namespace handling The identify command with cns set to NVME_ID_CNS_NS does not directly depend on the command set. The NVMe specifications is rather confusing here as it appears that this command only applies to the NVM command set. However, footnote 8 of Figure 273 in the NVMe 2.0 base specifications clearly state that this command applies to NVM command sets that support logical blocks, that is, NVM and ZNS. Both the NVM and ZNS command set specifications also list this identify as mandatory. The command handling should thus not look at the csi field since it is defined as unused for this command. Given that we do not support the KV command set, simply remove the csi switch-case for that command handling and call directly nvmet_execute_identify_ns() in nvmet_execute_identify(). Fixes: ab5d0b38c047 ("nvmet: add Command Set Identifier support") Signed-off-by: Damien Le Moal Reviewed-by: Chaitanya Kulkarni Tested-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 80099df37314..a982f925dfce 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -692,13 +692,8 @@ static void nvmet_execute_identify(struct nvmet_req *req) switch (req->cmd->identify.cns) { case NVME_ID_CNS_NS: - switch (req->cmd->identify.csi) { - case NVME_CSI_NVM: - return nvmet_execute_identify_ns(req); - default: - break; - } - break; + nvmet_execute_identify_ns(req); + return; case NVME_ID_CNS_CS_NS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { switch (req->cmd->identify.csi) { -- cgit v1.2.3 From 62904b3b333e7f3c0f879dc3513295eee5765c9f Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Wed, 15 Mar 2023 19:59:36 +0900 Subject: nvmet: fix Identify Controller handling The identify command with cns set to NVME_ID_CNS_CTRL does not depend on the command set. The execution of this command should thus not look at the csi specified in the command. Simplify nvmet_execute_identify() to directly call nvmet_execute_identify_ctrl() without the csi switch-case. Fixes: ab5d0b38c047 ("nvmet: add Command Set Identifier support") Signed-off-by: Damien Le Moal Reviewed-by: Chaitanya Kulkarni Tested-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index a982f925dfce..a747c69074da 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -705,11 +705,8 @@ static void nvmet_execute_identify(struct nvmet_req *req) } break; case NVME_ID_CNS_CTRL: - switch (req->cmd->identify.csi) { - case NVME_CSI_NVM: - return nvmet_execute_identify_ctrl(req); - } - break; + nvmet_execute_identify_ctrl(req); + return; case NVME_ID_CNS_CS_CTRL: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { switch (req->cmd->identify.csi) { -- cgit v1.2.3 From 97416f67d55fb8b866ff1815ca7ef26b6dfa6a5e Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Wed, 15 Mar 2023 19:59:37 +0900 Subject: nvmet: fix Identify Active Namespace ID list handling The identify command with cns set to NVME_ID_CNS_NS_ACTIVE_LIST does not depend on the command set. The execution of this command should thus not look at the csi field specified in the command. Simplify nvmet_execute_identify() to directly call nvmet_execute_identify_nslist() without the csi switch-case. Fixes: ab5d0b38c047 ("nvmet: add Command Set Identifier support") Signed-off-by: Damien Le Moal Reviewed-by: Chaitanya Kulkarni Tested-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index a747c69074da..93c441ab9f09 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -718,13 +718,8 @@ static void nvmet_execute_identify(struct nvmet_req *req) } break; case NVME_ID_CNS_NS_ACTIVE_LIST: - switch (req->cmd->identify.csi) { - case NVME_CSI_NVM: - return nvmet_execute_identify_nslist(req); - default: - break; - } - break; + nvmet_execute_identify_nslist(req); + return; case NVME_ID_CNS_NS_DESC_LIST: if (nvmet_handle_identify_desclist(req) == true) return; -- cgit v1.2.3 From a5a6ab0950b46ab1ef4a5c83c80234018b81b38a Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Wed, 15 Mar 2023 19:59:38 +0900 Subject: nvmet: fix I/O Command Set specific Identify Controller For an identify command with cns set to NVME_ID_CNS_CS_CTRL, the NVMe 2.0 specification states that: If the I/O Command Set specified by the CSI field does not have an Identify Controller data structure, then the controller shall return a zero filled data structure. If the host requests a data structure for an I/O Command Set that the controller does not support, the controller shall abort the command with a status code of Invalid Field in Command. However, the current implementation of this identify command in nvmet_execute_identify() only handles the ZNS command set, returning an error for the NVM command set, which is not compliant with the specifications as we do support this command set. Fix this by: 1) Renaming nvmet_execute_identify_cns_cs_ctrl() to nvmet_execute_identify_ctrl_zns() to continue handling the ZNS command set as is. 2) Introduce a nvmet_execute_identify_ctrl_ns() helper to handle the NVM command set, returning a zero filled nvme_id_ctrl_nvm data structure. 3) Modify nvmet_execute_identify() to call these helpers based on the csi specified, returning an error for unsupported command sets. Fixes: aaf2e048af27 ("nvmet: add ZBD over ZNS backend support") Signed-off-by: Damien Le Moal Reviewed-by: Chaitanya Kulkarni Tested-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 22 ++++++++++++++++------ drivers/nvme/target/nvmet.h | 2 +- drivers/nvme/target/zns.c | 2 +- 3 files changed, 18 insertions(+), 8 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 93c441ab9f09..5bfbf5c651db 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -685,6 +685,13 @@ static bool nvmet_handle_identify_desclist(struct nvmet_req *req) } } +static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req) +{ + /* Not supported: return zeroes */ + nvmet_req_complete(req, + nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm))); +} + static void nvmet_execute_identify(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) @@ -708,13 +715,16 @@ static void nvmet_execute_identify(struct nvmet_req *req) nvmet_execute_identify_ctrl(req); return; case NVME_ID_CNS_CS_CTRL: - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { - switch (req->cmd->identify.csi) { - case NVME_CSI_ZNS: - return nvmet_execute_identify_cns_cs_ctrl(req); - default: - break; + switch (req->cmd->identify.csi) { + case NVME_CSI_NVM: + nvmet_execute_identify_ctrl_nvm(req); + return; + case NVME_CSI_ZNS: + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { + nvmet_execute_identify_ctrl_zns(req); + return; } + break; } break; case NVME_ID_CNS_NS_ACTIVE_LIST: diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 89bedfcd974c..ed3786140965 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -581,7 +581,7 @@ bool nvmet_ns_revalidate(struct nvmet_ns *ns); u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts); bool nvmet_bdev_zns_enable(struct nvmet_ns *ns); -void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req); +void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req); void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req); void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req); void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req); diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 77ef0a86ff61..ae4d9d35e46a 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -70,7 +70,7 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns) return true; } -void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req) +void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req) { u8 zasl = req->sq->ctrl->subsys->zasl; struct nvmet_ctrl *ctrl = req->sq->ctrl; -- cgit v1.2.3 From 145f0dbb8aac48f2e3abd4bd09582c78afef07a7 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Wed, 15 Mar 2023 19:59:39 +0900 Subject: nvmet: cleanup nvmet_execute_identify() Change the order of the cases in nvmet_execute_identify() main switch-case to match the NVMe 2.0 specification order as defined in table 273. This is also the increasing order of CNS values. While at it, for clarity, make it explicit that identify with cns set to NVME_ID_CNS_CS_NS does not support NVM command set specific data. No functional changes are introduced by this cleanup. Signed-off-by: Damien Le Moal Reviewed-by: Chaitanya Kulkarni Tested-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 5bfbf5c651db..3c4f2ddc0960 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -701,19 +701,29 @@ static void nvmet_execute_identify(struct nvmet_req *req) case NVME_ID_CNS_NS: nvmet_execute_identify_ns(req); return; + case NVME_ID_CNS_CTRL: + nvmet_execute_identify_ctrl(req); + return; + case NVME_ID_CNS_NS_ACTIVE_LIST: + nvmet_execute_identify_nslist(req); + return; + case NVME_ID_CNS_NS_DESC_LIST: + if (nvmet_handle_identify_desclist(req) == true) + return; + break; case NVME_ID_CNS_CS_NS: - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { - switch (req->cmd->identify.csi) { - case NVME_CSI_ZNS: - return nvmet_execute_identify_cns_cs_ns(req); - default: - break; + switch (req->cmd->identify.csi) { + case NVME_CSI_NVM: + /* Not supported */ + break; + case NVME_CSI_ZNS: + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { + nvmet_execute_identify_cns_cs_ns(req); + return; } + break; } break; - case NVME_ID_CNS_CTRL: - nvmet_execute_identify_ctrl(req); - return; case NVME_ID_CNS_CS_CTRL: switch (req->cmd->identify.csi) { case NVME_CSI_NVM: @@ -727,13 +737,6 @@ static void nvmet_execute_identify(struct nvmet_req *req) break; } break; - case NVME_ID_CNS_NS_ACTIVE_LIST: - nvmet_execute_identify_nslist(req); - return; - case NVME_ID_CNS_NS_DESC_LIST: - if (nvmet_handle_identify_desclist(req) == true) - return; - break; } nvmet_req_cns_error_complete(req); -- cgit v1.2.3 From 2f17f42c7f52a34abff2c92766806b58ea142c23 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 15 Mar 2023 15:13:35 +0100 Subject: nvmet: fix Identify Identification Descriptor List handling The Identification Descriptor List CNS value does not check the CSI value, so remove the code trying to handle it. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Damien Le Moal --- drivers/nvme/target/admin-cmd.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 3c4f2ddc0960..c0908031ce25 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -668,23 +668,6 @@ out: nvmet_req_complete(req, status); } -static bool nvmet_handle_identify_desclist(struct nvmet_req *req) -{ - switch (req->cmd->identify.csi) { - case NVME_CSI_NVM: - nvmet_execute_identify_desclist(req); - return true; - case NVME_CSI_ZNS: - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { - nvmet_execute_identify_desclist(req); - return true; - } - return false; - default: - return false; - } -} - static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req) { /* Not supported: return zeroes */ @@ -708,8 +691,7 @@ static void nvmet_execute_identify(struct nvmet_req *req) nvmet_execute_identify_nslist(req); return; case NVME_ID_CNS_NS_DESC_LIST: - if (nvmet_handle_identify_desclist(req) == true) - return; + nvmet_execute_identify_desclist(req); break; case NVME_ID_CNS_CS_NS: switch (req->cmd->identify.csi) { -- cgit v1.2.3 From 932635356618c19317c5b306b320368525cd1961 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 15 Mar 2023 15:14:31 +0100 Subject: nvmet: rename nvmet_execute_identify_cns_cs_ns nvmet_execute_identify_ns_zns is a more descriptive name for the function handling the "I/O Command Set Specific Identify Namespace Data Structure for the Zoned Namespace Command Set". Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Damien Le Moal --- drivers/nvme/target/admin-cmd.c | 4 ++-- drivers/nvme/target/nvmet.h | 2 +- drivers/nvme/target/zns.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index c0908031ce25..afdf69f92599 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -692,7 +692,7 @@ static void nvmet_execute_identify(struct nvmet_req *req) return; case NVME_ID_CNS_NS_DESC_LIST: nvmet_execute_identify_desclist(req); - break; + return; case NVME_ID_CNS_CS_NS: switch (req->cmd->identify.csi) { case NVME_CSI_NVM: @@ -700,7 +700,7 @@ static void nvmet_execute_identify(struct nvmet_req *req) break; case NVME_CSI_ZNS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { - nvmet_execute_identify_cns_cs_ns(req); + nvmet_execute_identify_ns_zns(req); return; } break; diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index ed3786140965..2c1522d4063b 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -582,7 +582,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts); bool nvmet_bdev_zns_enable(struct nvmet_ns *ns); void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req); -void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req); +void nvmet_execute_identify_ns_zns(struct nvmet_req *req); void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req); void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req); void nvmet_bdev_execute_zone_append(struct nvmet_req *req); diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index ae4d9d35e46a..5b5c1e481722 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -95,7 +95,7 @@ out: nvmet_req_complete(req, status); } -void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) +void nvmet_execute_identify_ns_zns(struct nvmet_req *req) { struct nvme_id_ns_zns *id_zns = NULL; u64 zsze; -- cgit v1.2.3 From c5a9abfad9fb010c39defeee0c939bfa9430a5a3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Mar 2023 09:08:59 +0100 Subject: nvmet: remove nvmet_req_cns_error_complete Just fold it into the only caller. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg --- drivers/nvme/target/admin-cmd.c | 5 ++++- drivers/nvme/target/nvmet.h | 8 -------- 2 files changed, 4 insertions(+), 9 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index afdf69f92599..39cb570f833d 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -721,7 +721,10 @@ static void nvmet_execute_identify(struct nvmet_req *req) break; } - nvmet_req_cns_error_complete(req); + pr_debug("unhandled identify cns %d on qid %d\n", + req->cmd->identify.cns, req->sq->qid); + req->error_loc = offsetof(struct nvme_identify, cns); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); } /* diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 2c1522d4063b..dc60a22646f7 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -687,14 +687,6 @@ static inline bool nvmet_use_inline_bvec(struct nvmet_req *req) req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC; } -static inline void nvmet_req_cns_error_complete(struct nvmet_req *req) -{ - pr_debug("unhandled identify cns %d on qid %d\n", - req->cmd->identify.cns, req->sq->qid); - req->error_loc = offsetof(struct nvme_identify, cns); - nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); -} - static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio) { if (bio != &req->b.inline_bio) -- cgit v1.2.3 From aeacfcefa218f4ed11da478e9b7915a37d1afaff Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 21 Mar 2023 09:30:25 -0700 Subject: nvme-tcp: fence TCP socket on receive error Ensure that no further socket reads occur after a receive processing error, either from io_work being re-scheduled or nvme_tcp_poll. Failing to do so can result in unrecognised PDU payloads or TCP stream garbage being processed as a C2H data PDU, and potentially start copying the payload to an invalid destination after looking up a request using a bogus command id. Signed-off-by: Chris Leech Reviewed-by: Sagi Grimberg Reviewed-by: John Meneghini Signed-off-by: Christoph Hellwig --- drivers/nvme/host/tcp.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 7723a4989524..273c1f2760a4 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -876,6 +876,9 @@ static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb, size_t consumed = len; int result; + if (unlikely(!queue->rd_enabled)) + return -EFAULT; + while (len) { switch (nvme_tcp_recv_state(queue)) { case NVME_TCP_RECV_PDU: -- cgit v1.2.3 From 44aef3b850757a371adde7e47c5c243d3988aa73 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Sun, 26 Mar 2023 22:37:23 -0700 Subject: nvmet-tcp: validate so_priority modparam value The module parameter so_priority is passed to the function sock_set_priority() which has following prototype and expect priotity arg type to be u32:- void sock_set_priority(struct sock *sk, u32 priority); Add a module parameter validation callback to reject any negative values for the so_priority as it is defigned as int. Use this oppurtunity to update the module parameter description and print the default value. Signed-off-by: Chaitanya Kulkarni Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 66e8f9fd0ca7..85ffcc8d0f99 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -20,6 +20,31 @@ #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) +static int param_store_val(const char *str, int *val, int min, int max) +{ + int ret, new_val; + + ret = kstrtoint(str, 10, &new_val); + if (ret) + return -EINVAL; + + if (new_val < min || new_val > max) + return -EINVAL; + + *val = new_val; + return 0; +} + +static int set_params(const char *str, const struct kernel_param *kp) +{ + return param_store_val(str, kp->arg, 0, INT_MAX); +} + +static const struct kernel_param_ops set_param_ops = { + .set = set_params, + .get = param_get_int, +}; + /* Define the socket priority to use for connections were it is desirable * that the NIC consider performing optimized packet processing or filtering. * A non-zero value being sufficient to indicate general consideration of any @@ -27,8 +52,8 @@ * values that may be unique for some NIC implementations. */ static int so_priority; -module_param(so_priority, int, 0644); -MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority"); +device_param_cb(so_priority, &set_param_ops, &so_priority, 0644); +MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0"); /* Define a time period (in usecs) that io_work() shall sample an activated * queue before determining it to be idle. This optional module behavior -- cgit v1.2.3 From 6fe240bc0d97902ecebd811bb22bd85b36d75ee2 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Sun, 26 Mar 2023 22:37:24 -0700 Subject: nvmet-tcp: validate idle poll modparam value The module parameter idle_poll_period_usecs is passed to the function usecs_to_jiffies() which has following prototype and expect idle_poll_period_usecs arg type to be unsigned int:- unsigned long usecs_to_jiffies(const unsigned int u); Use similar module parameter validation callback as previous patch. Signed-off-by: Chaitanya Kulkarni Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 85ffcc8d0f99..ed98df72c76b 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -61,9 +61,10 @@ MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0"); * using advanced interrupt moderation techniques. */ static int idle_poll_period_usecs; -module_param(idle_poll_period_usecs, int, 0644); +device_param_cb(idle_poll_period_usecs, &set_param_ops, + &idle_poll_period_usecs, 0644); MODULE_PARM_DESC(idle_poll_period_usecs, - "nvmet tcp io_work poll till idle time period in usecs"); + "nvmet tcp io_work poll till idle time period in usecs: Default 0"); #define NVMET_TCP_RECV_BUDGET 8 #define NVMET_TCP_SEND_BUDGET 8 -- cgit v1.2.3 From 2ce525d40aa61c87884b100995e59ba68b4ea059 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Sun, 26 Mar 2023 22:48:37 -0700 Subject: nvme-apple: return directly instead of else There is no need for the else when direct return is used at the end of the function. Signed-off-by: Chaitanya Kulkarni Reviewed-by: Eric Curtin Signed-off-by: Christoph Hellwig --- drivers/nvme/host/apple.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index b317ce6c4ec3..484112f8cb27 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -209,8 +209,8 @@ static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q) { if (q->is_adminq) return container_of(q, struct apple_nvme, adminq); - else - return container_of(q, struct apple_nvme, ioq); + + return container_of(q, struct apple_nvme, ioq); } static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q) -- cgit v1.2.3 From cf806e3ab1c1878e357fd91205e020ea0dfdafe6 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Sun, 26 Mar 2023 22:48:38 -0700 Subject: nvme-apple: return directly instead of else There is no need for the else when direct return is used at the end of the function. Signed-off-by: Chaitanya Kulkarni Reviewed-by: Eric Curtin Signed-off-by: Christoph Hellwig --- drivers/nvme/host/apple.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 484112f8cb27..596bb11eeba5 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -217,8 +217,8 @@ static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q) { if (q->is_adminq) return APPLE_NVME_AQ_DEPTH; - else - return APPLE_ANS_MAX_QUEUE_DEPTH; + + return APPLE_ANS_MAX_QUEUE_DEPTH; } static void apple_nvme_rtkit_crashed(void *cookie) -- cgit v1.2.3 From 6622b76fe922b94189499a90ccdb714a4a8d0773 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 5 Apr 2023 14:57:20 -0700 Subject: nvme: fix async event trace event Mixing AER Event Type and Event Info has masking clashes. Just print the event type, but also include the event info of the AER result in the trace. Fixes: 09bd1ff4b15143b ("nvme-core: add async event trace helper") Reported-by: Nate Thornton Reviewed-by: Sagi Grimberg Reviewed-by: Minwoo Im Reviewed-by: Chaitanya Kulkarni Signed-off-by: Keith Busch Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 5 +---- drivers/nvme/host/trace.h | 15 ++++++--------- 2 files changed, 7 insertions(+), 13 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c2730b116dc6..0699365461f4 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4808,8 +4808,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) u32 aer_notice_type = nvme_aer_subtype(result); bool requeue = true; - trace_nvme_async_event(ctrl, aer_notice_type); - switch (aer_notice_type) { case NVME_AER_NOTICE_NS_CHANGED: set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); @@ -4845,7 +4843,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) { - trace_nvme_async_event(ctrl, NVME_AER_ERROR); dev_warn(ctrl->device, "resetting controller due to AER\n"); nvme_reset_ctrl(ctrl); } @@ -4861,6 +4858,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) return; + trace_nvme_async_event(ctrl, result); switch (aer_type) { case NVME_AER_NOTICE: requeue = nvme_handle_aen_notice(ctrl, result); @@ -4878,7 +4876,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, case NVME_AER_SMART: case NVME_AER_CSS: case NVME_AER_VS: - trace_nvme_async_event(ctrl, aer_type); ctrl->aen_result = result; break; default: diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h index 6f0eaf6a1528..4fb5922ffdac 100644 --- a/drivers/nvme/host/trace.h +++ b/drivers/nvme/host/trace.h @@ -127,15 +127,12 @@ TRACE_EVENT(nvme_async_event, ), TP_printk("nvme%d: NVME_AEN=%#08x [%s]", __entry->ctrl_id, __entry->result, - __print_symbolic(__entry->result, - aer_name(NVME_AER_NOTICE_NS_CHANGED), - aer_name(NVME_AER_NOTICE_ANA), - aer_name(NVME_AER_NOTICE_FW_ACT_STARTING), - aer_name(NVME_AER_NOTICE_DISC_CHANGED), - aer_name(NVME_AER_ERROR), - aer_name(NVME_AER_SMART), - aer_name(NVME_AER_CSS), - aer_name(NVME_AER_VS)) + __print_symbolic(__entry->result & 0x7, + aer_name(NVME_AER_ERROR), + aer_name(NVME_AER_SMART), + aer_name(NVME_AER_NOTICE), + aer_name(NVME_AER_CSS), + aer_name(NVME_AER_VS)) ) ); -- cgit v1.2.3 From d4f1d5f7a4d8d998739c3d699476cd0247d580c6 Mon Sep 17 00:00:00 2001 From: Lei Yin Date: Thu, 6 Apr 2023 23:39:11 +0800 Subject: nvme: fix double blk_mq_complete_request for timeout request with low probability When nvme_cancel_tagset traverses all tagsets and executes nvme_cancel_request, this request may be executing blk_mq_free_request that is called by nvme_rdma_complete_timed_out/nvme_tcp_complete_timed_out. When blk_mq_free_request executes to WRITE_ONCE(rq->state, MQ_RQ_IDLE) and __blk_mq_free_request(rq), it will cause double blk_mq_complete_request for this request, and it will cause a null pointer error in the second execution of this function because rq->mq_hctx has set to NULL in first execution. Signed-off-by: Lei Yin Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0699365461f4..6c1e7d6709e0 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -450,8 +450,8 @@ bool nvme_cancel_request(struct request *req, void *data) dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, "Cancelling I/O %d", req->tag); - /* don't abort one completed request */ - if (blk_mq_request_completed(req)) + /* don't abort one completed or idle request */ + if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) return true; nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; -- cgit v1.2.3 From 015ad2b1e4b9e04628b459ad8d5324673f6cbc9f Mon Sep 17 00:00:00 2001 From: zhenwei pi Date: Fri, 7 Apr 2023 17:15:57 +0800 Subject: nvme-rdma: minor cleanup in nvme_rdma_create_cq() Before cleanup: enum ib_poll_context poll_ctx; if (nvme_rdma_poll_queue(queue)) { poll_ctx = IB_POLL_DIRECT; queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, comp_vector, poll_ctx); } else { poll_ctx = IB_POLL_SOFTIRQ; queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, comp_vector, poll_ctx); } After cleanup: if (nvme_rdma_poll_queue(queue)) queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, comp_vector, IB_POLL_DIRECT); else queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, comp_vector, IB_POLL_SOFTIRQ); IB_POLL_SOFTIRQ/IB_POLL_SOFTIRQ gets used directly in function, this seems more accessible. Signed-off-by: zhenwei pi Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/rdma.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index bbad26b82b56..4767ae19178c 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -464,7 +464,6 @@ static int nvme_rdma_create_cq(struct ib_device *ibdev, struct nvme_rdma_queue *queue) { int ret, comp_vector, idx = nvme_rdma_queue_idx(queue); - enum ib_poll_context poll_ctx; /* * Spread I/O queues completion vectors according their queue index. @@ -473,15 +472,12 @@ static int nvme_rdma_create_cq(struct ib_device *ibdev, comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; /* Polling queues need direct cq polling context */ - if (nvme_rdma_poll_queue(queue)) { - poll_ctx = IB_POLL_DIRECT; + if (nvme_rdma_poll_queue(queue)) queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, - comp_vector, poll_ctx); - } else { - poll_ctx = IB_POLL_SOFTIRQ; + comp_vector, IB_POLL_DIRECT); + else queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, - comp_vector, poll_ctx); - } + comp_vector, IB_POLL_SOFTIRQ); if (IS_ERR(queue->ib_cq)) { ret = PTR_ERR(queue->ib_cq); -- cgit v1.2.3 From edde9e70bb48301c853299f1bd7c0aa0745a38ea Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 22 Mar 2023 14:37:03 +0200 Subject: blk-mq-rdma: remove queue mapping helper for rdma devices No rdma device exposes its irq vectors affinity today. So the only mapping that we have left, is the default blk_mq_map_queues, which we fallback to anyways. Also fixup the only consumer of this helper (nvme-rdma). Remove this now dead code. Signed-off-by: Sagi Grimberg Acked-by: Keith Busch Signed-off-by: Christoph Hellwig --- block/Kconfig | 5 ----- block/Makefile | 1 - block/blk-mq-rdma.c | 44 -------------------------------------------- drivers/nvme/host/rdma.c | 7 ++----- include/linux/blk-mq-rdma.h | 11 ----------- 5 files changed, 2 insertions(+), 66 deletions(-) delete mode 100644 block/blk-mq-rdma.c delete mode 100644 include/linux/blk-mq-rdma.h (limited to 'drivers/nvme') diff --git a/block/Kconfig b/block/Kconfig index 5d9d9c84d516..0b7c13f9a089 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -215,11 +215,6 @@ config BLK_MQ_VIRTIO depends on VIRTIO default y -config BLK_MQ_RDMA - bool - depends on INFINIBAND - default y - config BLK_PM def_bool PM diff --git a/block/Makefile b/block/Makefile index 4e01bb71ad6e..b31b05390749 100644 --- a/block/Makefile +++ b/block/Makefile @@ -30,7 +30,6 @@ obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o -obj-$(CONFIG_BLK_MQ_RDMA) += blk-mq-rdma.o obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o obj-$(CONFIG_BLK_WBT) += blk-wbt.o obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c deleted file mode 100644 index 29c1f4d6eb04..000000000000 --- a/block/blk-mq-rdma.c +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2017 Sagi Grimberg. - */ -#include -#include -#include - -/** - * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device - * @map: CPU to hardware queue map. - * @dev: rdma device to provide a mapping for. - * @first_vec: first interrupt vectors to use for queues (usually 0) - * - * This function assumes the rdma device @dev has at least as many available - * interrupt vetors as @set has queues. It will then query it's affinity mask - * and built queue mapping that maps a queue to the CPUs that have irq affinity - * for the corresponding vector. - * - * In case either the driver passed a @dev with less vectors than - * @set->nr_hw_queues, or @dev does not provide an affinity mask for a - * vector, we fallback to the naive mapping. - */ -void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, - struct ib_device *dev, int first_vec) -{ - const struct cpumask *mask; - unsigned int queue, cpu; - - for (queue = 0; queue < map->nr_queues; queue++) { - mask = ib_get_vector_affinity(dev, first_vec + queue); - if (!mask) - goto fallback; - - for_each_cpu(cpu, mask) - map->mq_map[cpu] = map->queue_offset + queue; - } - - return; - -fallback: - blk_mq_map_queues(map); -} -EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 4767ae19178c..0eb79696fb73 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -2159,10 +2158,8 @@ static void nvme_rdma_map_queues(struct blk_mq_tag_set *set) ctrl->io_queues[HCTX_TYPE_DEFAULT]; set->map[HCTX_TYPE_READ].queue_offset = 0; } - blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], - ctrl->device->dev, 0); - blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ], - ctrl->device->dev, 0); + blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); + blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { /* map dedicated poll queues only if we have queues left */ diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h deleted file mode 100644 index 53b58c610e76..000000000000 --- a/include/linux/blk-mq-rdma.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_BLK_MQ_RDMA_H -#define _LINUX_BLK_MQ_RDMA_H - -struct blk_mq_tag_set; -struct ib_device; - -void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, - struct ib_device *dev, int first_vec); - -#endif /* _LINUX_BLK_MQ_RDMA_H */ -- cgit v1.2.3 From 4f86a6ff6fbd891232dda3ca97fd1b9630b59809 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Wed, 12 Apr 2023 16:49:04 +0800 Subject: nvme-fcloop: fix "inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage" fcloop_fcp_op() could be called from flush request's ->end_io(flush_end_io) in which the spinlock of fq->mq_flush_lock is grabbed with irq saved/disabled. So fcloop_fcp_op() can't call spin_unlock_irq(&tfcp_req->reqlock) simply which enables irq unconditionally. Fixes the warning by switching to spin_lock_irqsave()/spin_unlock_irqrestore() Fixes: c38dbbfab1bc ("nvme-fcloop: fix inconsistent lock state warnings") Reported-by: Yi Zhang Signed-off-by: Ming Lei Reviewed-by: Ewan D. Milne Tested-by: Yi Zhang Signed-off-by: Christoph Hellwig --- drivers/nvme/target/fcloop.c | 48 +++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 21 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 5c16372f3b53..c780af36c1d4 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -614,10 +614,11 @@ fcloop_fcp_recv_work(struct work_struct *work) struct fcloop_fcpreq *tfcp_req = container_of(work, struct fcloop_fcpreq, fcp_rcv_work); struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; + unsigned long flags; int ret = 0; bool aborted = false; - spin_lock_irq(&tfcp_req->reqlock); + spin_lock_irqsave(&tfcp_req->reqlock, flags); switch (tfcp_req->inistate) { case INI_IO_START: tfcp_req->inistate = INI_IO_ACTIVE; @@ -626,11 +627,11 @@ fcloop_fcp_recv_work(struct work_struct *work) aborted = true; break; default: - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); WARN_ON(1); return; } - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); if (unlikely(aborted)) ret = -ECANCELED; @@ -655,8 +656,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) container_of(work, struct fcloop_fcpreq, abort_rcv_work); struct nvmefc_fcp_req *fcpreq; bool completed = false; + unsigned long flags; - spin_lock_irq(&tfcp_req->reqlock); + spin_lock_irqsave(&tfcp_req->reqlock, flags); fcpreq = tfcp_req->fcpreq; switch (tfcp_req->inistate) { case INI_IO_ABORTED: @@ -665,11 +667,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) completed = true; break; default: - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); WARN_ON(1); return; } - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); if (unlikely(completed)) { /* remove reference taken in original abort downcall */ @@ -681,9 +683,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, &tfcp_req->tgt_fcp_req); - spin_lock_irq(&tfcp_req->reqlock); + spin_lock_irqsave(&tfcp_req->reqlock, flags); tfcp_req->fcpreq = NULL; - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); /* call_host_done releases reference for abort downcall */ @@ -699,11 +701,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work) struct fcloop_fcpreq *tfcp_req = container_of(work, struct fcloop_fcpreq, tio_done_work); struct nvmefc_fcp_req *fcpreq; + unsigned long flags; - spin_lock_irq(&tfcp_req->reqlock); + spin_lock_irqsave(&tfcp_req->reqlock, flags); fcpreq = tfcp_req->fcpreq; tfcp_req->inistate = INI_IO_COMPLETED; - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status); } @@ -807,13 +810,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, u32 rsplen = 0, xfrlen = 0; int fcp_err = 0, active, aborted; u8 op = tgt_fcpreq->op; + unsigned long flags; - spin_lock_irq(&tfcp_req->reqlock); + spin_lock_irqsave(&tfcp_req->reqlock, flags); fcpreq = tfcp_req->fcpreq; active = tfcp_req->active; aborted = tfcp_req->aborted; tfcp_req->active = true; - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); if (unlikely(active)) /* illegal - call while i/o active */ @@ -821,9 +825,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, if (unlikely(aborted)) { /* target transport has aborted i/o prior */ - spin_lock_irq(&tfcp_req->reqlock); + spin_lock_irqsave(&tfcp_req->reqlock, flags); tfcp_req->active = false; - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); tgt_fcpreq->transferred_length = 0; tgt_fcpreq->fcp_error = -ECANCELED; tgt_fcpreq->done(tgt_fcpreq); @@ -880,9 +884,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, break; } - spin_lock_irq(&tfcp_req->reqlock); + spin_lock_irqsave(&tfcp_req->reqlock, flags); tfcp_req->active = false; - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); tgt_fcpreq->transferred_length = xfrlen; tgt_fcpreq->fcp_error = fcp_err; @@ -896,15 +900,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *tgt_fcpreq) { struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); + unsigned long flags; /* * mark aborted only in case there were 2 threads in transport * (one doing io, other doing abort) and only kills ops posted * after the abort request */ - spin_lock_irq(&tfcp_req->reqlock); + spin_lock_irqsave(&tfcp_req->reqlock, flags); tfcp_req->aborted = true; - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); tfcp_req->status = NVME_SC_INTERNAL; @@ -946,6 +951,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, struct fcloop_ini_fcpreq *inireq = fcpreq->private; struct fcloop_fcpreq *tfcp_req; bool abortio = true; + unsigned long flags; spin_lock(&inireq->inilock); tfcp_req = inireq->tfcp_req; @@ -958,7 +964,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, return; /* break initiator/target relationship for io */ - spin_lock_irq(&tfcp_req->reqlock); + spin_lock_irqsave(&tfcp_req->reqlock, flags); switch (tfcp_req->inistate) { case INI_IO_START: case INI_IO_ACTIVE: @@ -968,11 +974,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, abortio = false; break; default: - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); WARN_ON(1); return; } - spin_unlock_irq(&tfcp_req->reqlock); + spin_unlock_irqrestore(&tfcp_req->reqlock, flags); if (abortio) /* leave the reference while the work item is scheduled */ -- cgit v1.2.3