summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-10-22 15:03:35 +0300
committerJens Axboe <axboe@fb.com>2015-12-22 19:38:33 +0300
commit31c7c7d2c9f17dc98a98c59c17e184bf164ee760 (patch)
tree658e3a50b133a92780d4c5b662773e6f7145cfd5 /drivers/nvme
parent4c9f748f0ee88447b28546991f60f43a7319aafd (diff)
downloadlinux-31c7c7d2c9f17dc98a98c59c17e184bf164ee760.tar.xz
nvme: merge nvme_abort_req and nvme_timeout
We want to be able to return bettern error values frmo nvme_timeout, which is significantly easier if the two functions are merged. Also clean up and reduce the printk spew so that we only get one message per abort. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/pci.c47
1 files changed, 18 insertions, 29 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d4fef8190093..99c5b6319d8d 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1078,13 +1078,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
-/**
- * nvme_abort_req - Attempt aborting a request
- *
- * Schedule controller reset if the command was already aborted once before and
- * still hasn't been returned to the driver, or if this is the admin queue.
- */
-static void nvme_abort_req(struct request *req)
+static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
{
struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -1093,6 +1087,11 @@ static void nvme_abort_req(struct request *req)
struct nvme_cmd_info *abort_cmd;
struct nvme_command cmd;
+ /*
+ * Schedule controller reset if the command was already aborted once
+ * before and still hasn't been returned to the driver, or if this is
+ * the admin queue.
+ */
if (!nvmeq->qid || cmd_rq->aborted) {
spin_lock_irq(&dev_list_lock);
if (!__nvme_reset(dev)) {
@@ -1101,16 +1100,16 @@ static void nvme_abort_req(struct request *req)
req->tag, nvmeq->qid);
}
spin_unlock_irq(&dev_list_lock);
- return;
+ return BLK_EH_RESET_TIMER;
}
if (!dev->ctrl.abort_limit)
- return;
+ return BLK_EH_RESET_TIMER;
abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE,
BLK_MQ_REQ_NOWAIT);
if (IS_ERR(abort_req))
- return;
+ return BLK_EH_RESET_TIMER;
abort_cmd = blk_mq_rq_to_pdu(abort_req);
nvme_set_info(abort_cmd, abort_req, abort_completion);
@@ -1124,9 +1123,16 @@ static void nvme_abort_req(struct request *req)
--dev->ctrl.abort_limit;
cmd_rq->aborted = 1;
- dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
- nvmeq->qid);
+ dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
+ req->tag, nvmeq->qid);
nvme_submit_cmd(dev->queues[0], &cmd);
+
+ /*
+ * The aborted req will be completed on receiving the abort req.
+ * We enable the timer again. If hit twice, it'll cause a device reset,
+ * as the device then is in a faulty state.
+ */
+ return BLK_EH_RESET_TIMER;
}
static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
@@ -1157,23 +1163,6 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
fn(nvmeq, ctx, &cqe);
}
-static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
-{
- struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
- struct nvme_queue *nvmeq = cmd->nvmeq;
-
- dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
- nvmeq->qid);
- nvme_abort_req(req);
-
- /*
- * The aborted req will be completed on receiving the abort req.
- * We enable the timer again. If hit twice, it'll cause a device reset,
- * as the device then is in a faulty state.
- */
- return BLK_EH_RESET_TIMER;
-}
-
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),