summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/fc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/fc.c')
-rw-r--r--drivers/nvme/host/fc.c95
1 files changed, 25 insertions, 70 deletions
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index fb22976a36a8..68a5d971657b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
-static struct workqueue_struct *nvme_fc_wq;
-
-static bool nvme_fc_waiting_to_unload;
-static DECLARE_COMPLETION(nvme_fc_unload_proceed);
-
/*
* These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init.
@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
/* remove from transport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
list_del(&lport->port_list);
- if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
- complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
@@ -1218,10 +1211,10 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
/* Linux supports only Dynamic controllers */
assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
- strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
- min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
- strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
- min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
+ strscpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
+ sizeof(assoc_rqst->assoc_cmd.hostnqn));
+ strscpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
+ sizeof(assoc_rqst->assoc_cmd.subnqn));
lsop->queue = queue;
lsreq->rqstaddr = assoc_rqst;
@@ -2548,24 +2541,17 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
* the controller. Abort any ios on the association and let the
* create_association error path resolve things.
*/
- enum nvme_ctrl_state state;
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl->lock, flags);
- state = ctrl->ctrl.state;
- if (state == NVME_CTRL_CONNECTING) {
- set_bit(ASSOC_FAILED, &ctrl->flags);
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
__nvme_fc_abort_outstanding_ios(ctrl, true);
+ set_bit(ASSOC_FAILED, &ctrl->flags);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport error during (re)connect\n",
ctrl->cnum);
return;
}
- spin_unlock_irqrestore(&ctrl->lock, flags);
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
- if (state != NVME_CTRL_LIVE)
+ if (ctrl->ctrl.state != NVME_CTRL_LIVE)
return;
dev_warn(ctrl->ctrl.device,
@@ -2581,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
+ u16 qnum = op->queue->qnum;
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
@@ -2589,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
* will detect the aborted io and will fail the connection.
*/
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
+ "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: "
"x%08x/x%08x\n",
- ctrl->cnum, op->queue->qnum, sqe->common.opcode,
- sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
+ ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype,
+ nvme_fabrics_opcode_str(qnum, sqe),
+ sqe->common.cdw10, sqe->common.cdw11);
if (__nvme_fc_abort_op(ctrl, op))
nvme_fc_error_recovery(ctrl, "io timeout abort failed");
@@ -3180,16 +3168,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
else
ret = nvme_fc_recreate_io_queues(ctrl);
}
-
- spin_lock_irqsave(&ctrl->lock, flags);
if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
ret = -EIO;
- if (ret) {
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (ret)
goto out_term_aen_ops;
- }
+
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- spin_unlock_irqrestore(&ctrl->lock, flags);
ctrl->ctrl.nr_reconnects = 0;
@@ -3509,10 +3493,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->ctrl.opts = opts;
ctrl->ctrl.nr_reconnects = 0;
- if (lport->dev)
- ctrl->ctrl.numa_node = dev_to_node(lport->dev);
- else
- ctrl->ctrl.numa_node = NUMA_NO_NODE;
INIT_LIST_HEAD(&ctrl->ctrl_list);
ctrl->lport = lport;
ctrl->rport = rport;
@@ -3557,6 +3537,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
if (ret)
goto out_free_queues;
+ if (lport->dev)
+ ctrl->ctrl.numa_node = dev_to_node(lport->dev);
/* at this point, teardown path changes to ref counting on nvme ctrl */
@@ -3588,8 +3570,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
flush_delayed_work(&ctrl->connect_work);
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
- ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
+ "NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n",
+ ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn);
return &ctrl->ctrl;
@@ -3907,10 +3889,6 @@ static int __init nvme_fc_init_module(void)
{
int ret;
- nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
- if (!nvme_fc_wq)
- return -ENOMEM;
-
/*
* NOTE:
* It is expected that in the future the kernel will combine
@@ -3928,7 +3906,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class);
if (ret) {
pr_err("couldn't register class fc\n");
- goto out_destroy_wq;
+ return ret;
}
/*
@@ -3952,8 +3930,6 @@ out_destroy_device:
device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
class_unregister(&fc_class);
-out_destroy_wq:
- destroy_workqueue(nvme_fc_wq);
return ret;
}
@@ -3973,48 +3949,27 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
spin_unlock(&rport->lock);
}
-static void
-nvme_fc_cleanup_for_unload(void)
+static void __exit nvme_fc_exit_module(void)
{
struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport;
-
- list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
- list_for_each_entry(rport, &lport->endp_list, endp_list) {
- nvme_fc_delete_controllers(rport);
- }
- }
-}
-
-static void __exit nvme_fc_exit_module(void)
-{
unsigned long flags;
- bool need_cleanup = false;
spin_lock_irqsave(&nvme_fc_lock, flags);
- nvme_fc_waiting_to_unload = true;
- if (!list_empty(&nvme_fc_lport_list)) {
- need_cleanup = true;
- nvme_fc_cleanup_for_unload();
- }
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
+ list_for_each_entry(rport, &lport->endp_list, endp_list)
+ nvme_fc_delete_controllers(rport);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
- if (need_cleanup) {
- pr_info("%s: waiting for ctlr deletes\n", __func__);
- wait_for_completion(&nvme_fc_unload_proceed);
- pr_info("%s: ctrl deletes complete\n", __func__);
- }
+ flush_workqueue(nvme_delete_wq);
nvmf_unregister_transport(&nvme_fc_transport);
- ida_destroy(&nvme_fc_local_port_cnt);
- ida_destroy(&nvme_fc_ctrl_cnt);
-
device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class);
- destroy_workqueue(nvme_fc_wq);
}
module_init(nvme_fc_init_module);
module_exit(nvme_fc_exit_module);
+MODULE_DESCRIPTION("NVMe host FC transport driver");
MODULE_LICENSE("GPL v2");