summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/core.c')
-rw-r--r--drivers/nvme/host/core.c132
1 files changed, 92 insertions, 40 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ce1b61519441..e68a8c4ac5a6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
-EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
@@ -280,14 +279,13 @@ static blk_status_t nvme_error_status(u16 status)
static void nvme_retry_req(struct request *req)
{
- struct nvme_ns *ns = req->q->queuedata;
unsigned long delay = 0;
u16 crd;
/* The mask and shift result must be <= 3 */
crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
- if (ns && crd)
- delay = ns->ctrl->crdt[crd - 1] * 100;
+ if (crd)
+ delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
nvme_req(req)->retries++;
blk_mq_requeue_request(req, false);
@@ -331,7 +329,7 @@ static inline void nvme_end_req(struct request *req)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
- nvme_trace_bio_complete(req, status);
+ nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
@@ -357,6 +355,21 @@ void nvme_complete_rq(struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
+/*
+ * Called to unwind from ->queue_rq on a failed command submission so that the
+ * multipathing code gets called to potentially failover to another path.
+ * The caller needs to unwind all transport specific resource allocations and
+ * must return propagate the return value.
+ */
+blk_status_t nvme_host_path_error(struct request *req)
+{
+ nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+ blk_mq_set_request_complete(req);
+ nvme_complete_rq(req);
+ return BLK_STS_OK;
+}
+EXPORT_SYMBOL_GPL(nvme_host_path_error);
+
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
{
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
@@ -372,6 +385,26 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
+void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->tagset) {
+ blk_mq_tagset_busy_iter(ctrl->tagset,
+ nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->tagset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
+
+void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->admin_tagset) {
+ blk_mq_tagset_busy_iter(ctrl->admin_tagset,
+ nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
+
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
@@ -578,7 +611,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
+static struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
struct request *req;
@@ -589,7 +622,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
nvme_init_request(req, cmd);
return req;
}
-EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
@@ -844,11 +876,11 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
void nvme_cleanup_cmd(struct request *req)
{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
- struct nvme_ns *ns = req->rq_disk->private_data;
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
struct page *page = req->special_vec.bv_page;
- if (page == ns->ctrl->discard_page)
- clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
+ if (page == ctrl->discard_page)
+ clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(page_address(page) + req->special_vec.bv_offset);
}
@@ -927,7 +959,7 @@ static void nvme_execute_rq_polled(struct request_queue *q,
rq->cmd_flags |= REQ_HIPRI;
rq->end_io_data = &wait;
- blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
+ blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq);
while (!completion_done(&wait)) {
blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
@@ -966,7 +998,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
if (poll)
nvme_execute_rq_polled(req->q, NULL, req, at_head);
else
- blk_execute_rq(req->q, NULL, req, at_head);
+ blk_execute_rq(NULL, req, at_head);
if (result)
*result = nvme_req(req)->result;
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
@@ -1103,7 +1135,7 @@ void nvme_execute_passthru_rq(struct request *rq)
u32 effects;
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- blk_execute_rq(rq->q, disk, rq, 0);
+ blk_execute_rq(disk, rq, 0);
nvme_passthru_end(ctrl, effects);
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
@@ -1115,7 +1147,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
- struct gendisk *disk = ns ? ns->disk : NULL;
+ struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
struct bio *bio = NULL;
void *meta = NULL;
@@ -1135,8 +1167,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (ret)
goto out;
bio = req->bio;
- bio->bi_disk = disk;
- if (disk && meta_buffer && meta_len) {
+ if (bdev)
+ bio_set_dev(bio, bdev);
+ if (bdev && meta_buffer && meta_len) {
meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
meta_seed, write);
if (IS_ERR(meta)) {
@@ -1204,7 +1237,7 @@ static int nvme_keep_alive(struct nvme_ctrl *ctrl)
rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl;
- blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
+ blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
return 0;
}
@@ -1545,8 +1578,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
}
length = (io.nblocks + 1) << ns->lba_shift;
- meta_len = (io.nblocks + 1) * ns->ms;
- metadata = nvme_to_user_ptr(io.metadata);
+
+ if ((io.control & NVME_RW_PRINFO_PRACT) &&
+ ns->ms == sizeof(struct t10_pi_tuple)) {
+ /*
+ * Protection information is stripped/inserted by the
+ * controller.
+ */
+ if (nvme_to_user_ptr(io.metadata))
+ return -EINVAL;
+ meta_len = 0;
+ metadata = NULL;
+ } else {
+ meta_len = (io.nblocks + 1) * ns->ms;
+ metadata = nvme_to_user_ptr(io.metadata);
+ }
if (ns->features & NVME_NS_EXT_LBAS) {
length += meta_len;
@@ -2114,9 +2160,8 @@ static void nvme_update_disk_info(struct gendisk *disk,
nvme_config_discard(disk, ns);
nvme_config_write_zeroes(disk, ns);
- if ((id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags))
- set_disk_ro(disk, true);
+ set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
+ test_bit(NVME_NS_FORCE_RO, &ns->flags));
}
static inline bool nvme_first_scan(struct gendisk *disk)
@@ -2165,17 +2210,18 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
+ ret = nvme_configure_metadata(ns, id);
+ if (ret)
+ goto out_unfreeze;
+ nvme_set_chunk_sectors(ns, id);
+ nvme_update_disk_info(ns->disk, ns, id);
+
if (ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf);
if (ret)
goto out_unfreeze;
}
- ret = nvme_configure_metadata(ns, id);
- if (ret)
- goto out_unfreeze;
- nvme_set_chunk_sectors(ns, id);
- nvme_update_disk_info(ns->disk, ns, id);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
@@ -2819,7 +2865,7 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
+ return sysfs_emit(buf, "%s\n", subsys->subnqn);
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
@@ -2849,7 +2895,7 @@ static struct attribute *nvme_subsys_attrs[] = {
NULL,
};
-static struct attribute_group nvme_subsys_attrs_group = {
+static const struct attribute_group nvme_subsys_attrs_group = {
.attrs = nvme_subsys_attrs,
};
@@ -2858,6 +2904,11 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts && ctrl->opts->discovery_nqn;
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -2877,7 +2928,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
}
if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
- (ctrl->opts && ctrl->opts->discovery_nqn))
+ nvme_discovery_ctrl(ctrl))
continue;
dev_err(ctrl->device,
@@ -3146,7 +3197,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
- if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+ if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
@@ -3186,7 +3237,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
- if (!ctrl->identified) {
+ if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
ret = nvme_hwmon_init(ctrl);
if (ret < 0)
return ret;
@@ -3507,7 +3558,7 @@ static ssize_t nvme_sysfs_show_transport(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
+ return sysfs_emit(buf, "%s\n", ctrl->ops->name);
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
@@ -3541,7 +3592,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
+ return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
@@ -3551,7 +3602,7 @@ static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
+ return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
}
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
@@ -3561,7 +3612,7 @@ static ssize_t nvme_sysfs_show_hostid(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
+ return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
}
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
@@ -3679,7 +3730,7 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-static struct attribute_group nvme_dev_attrs_group = {
+static const struct attribute_group nvme_dev_attrs_group = {
.attrs = nvme_dev_attrs,
.is_visible = nvme_dev_attrs_are_visible,
};
@@ -3813,7 +3864,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
}
}
- list_add_tail(&ns->siblings, &head->list);
+ list_add_tail_rcu(&ns->siblings, &head->list);
ns->head = head;
mutex_unlock(&ctrl->subsys->lock);
return 0;
@@ -4422,6 +4473,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ nvme_hwmon_exit(ctrl);
nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
@@ -4434,7 +4486,7 @@ static void nvme_free_cels(struct nvme_ctrl *ctrl)
struct nvme_effects_log *cel;
unsigned long i;
- xa_for_each (&ctrl->cels, i, cel) {
+ xa_for_each(&ctrl->cels, i, cel) {
xa_erase(&ctrl->cels, i);
kfree(cel);
}