summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2018-03-01 01:49:11 +0300
committerJens Axboe <axboe@kernel.dk>2018-03-26 17:53:43 +0300
commit9d625f7792875e8119ac3f364f3fd71b8bfc1294 (patch)
treee130978c0a61fc3042c3d7e7064e73392f3a3b1f /drivers/nvme
parent0cdd5fca876b1e9c56ca01186ba650b680248b35 (diff)
downloadlinux-9d625f7792875e8119ac3f364f3fd71b8bfc1294.tar.xz
nvmet_fc: prevent new io rqsts in possible isr completions
When a bio completion calls back into the transport for a back-end io device, the request completion path can free the transport io job structure allowing it to be reused for other operations. The transport has a defer_rcv queue which holds temporary cmd rcv ops while waitng for io job structures. when the job frees, if there's a cmd waiting, it is picked up and submitted for processing, which can call back out to the bio path if it's a read. Unfortunately, what is unknown is the context of the original bio done call, and it may be in a state (softirq) that is not compatible with submitting the new bio in the same calling sequence. This is especially true when using scsi back-end devices as scsi is in softirq when it makes the done call. Correct by scheduling the io to be started via workq rather than calling the start new io path inline to the original bio done path. Signed-off-by: James Smart <james.smart@broadcom.com> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/target/fc.c19
1 files changed, 16 insertions, 3 deletions
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 9b39a6cb1935..9f80f98d81d2 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -87,6 +87,7 @@ struct nvmet_fc_fcp_iod {
struct nvmet_req req;
struct work_struct work;
struct work_struct done_work;
+ struct work_struct defer_work;
struct nvmet_fc_tgtport *tgtport;
struct nvmet_fc_tgt_queue *queue;
@@ -224,6 +225,7 @@ static DEFINE_IDA(nvmet_fc_tgtport_cnt);
static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
+static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
@@ -429,6 +431,7 @@ nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
for (i = 0; i < queue->sqsize; fod++, i++) {
INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
+ INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
fod->tgtport = tgtport;
fod->queue = queue;
fod->active = false;
@@ -512,6 +515,17 @@ nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
}
static void
+nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
+{
+ struct nvmet_fc_fcp_iod *fod =
+ container_of(work, struct nvmet_fc_fcp_iod, defer_work);
+
+ /* Submit deferred IO for processing */
+ nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
+
+}
+
+static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
struct nvmet_fc_fcp_iod *fod)
{
@@ -568,13 +582,12 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
/* inform LLDD IO is now being processed */
tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
- /* Submit deferred IO for processing */
- nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
-
/*
* Leave the queue lookup get reference taken when
* fod was originally allocated.
*/
+
+ queue_work(queue->work_q, &fod->defer_work);
}
static int