summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-04-22 19:23:55 +0300
committerJens Axboe <axboe@kernel.dk>2021-04-22 19:23:55 +0300
commit87d9ad028975e8f47a980fffa9196b426f69f258 (patch)
tree63ee3b5b5913eb1927d0bda21a451156b26fa3fc
parentf4be591f1436afff4a18ddd180f7bf9421ffddfe (diff)
parent2637baed78010eeaae274feb5b99ce90933fadfb (diff)
downloadlinux-87d9ad028975e8f47a980fffa9196b426f69f258.tar.xz
Merge tag 'nvme-5.13-2021-04-22' of git://git.infradead.org/nvme into for-5.13/drivers
Pull NVMe updates from Christoph: "- add support for a per-namespace character device (Minwoo Im) - various KATO fixes and cleanups (Hou Pu, Hannes Reinecke) - APST fix and cleanup" * tag 'nvme-5.13-2021-04-22' of git://git.infradead.org/nvme: nvme: introduce generic per-namespace chardev nvme: cleanup nvme_configure_apst nvme: do not try to reconfigure APST when the controller is not live nvme: add 'kato' sysfs attribute nvme: sanitize KATO setting nvmet: avoid queuing keep-alive timer if it is disabled
-rw-r--r--drivers/nvme/host/core.c258
-rw-r--r--drivers/nvme/host/fabrics.c4
-rw-r--r--drivers/nvme/host/ioctl.c38
-rw-r--r--drivers/nvme/host/multipath.c51
-rw-r--r--drivers/nvme/host/nvme.h14
-rw-r--r--drivers/nvme/target/admin-cmd.c10
6 files changed, 276 insertions, 99 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 40f08e6325ef..2f45e8fcdd7c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -89,6 +89,10 @@ static dev_t nvme_ctrl_base_chr_devt;
static struct class *nvme_class;
static struct class *nvme_subsys_class;
+static DEFINE_IDA(nvme_ns_chr_minor_ida);
+static dev_t nvme_ns_chr_devt;
+static struct class *nvme_ns_chr_class;
+
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
@@ -1109,6 +1113,17 @@ void nvme_execute_passthru_rq(struct request *rq)
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
+/*
+ * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
+ *
+ * The host should send Keep Alive commands at half of the Keep Alive Timeout
+ * accounting for transport roundtrip times [..].
+ */
+static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
+{
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
+}
+
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
@@ -1131,7 +1146,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
}
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
@@ -1161,7 +1176,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
dev_dbg(ctrl->device,
"reschedule traffic based keep-alive timer\n");
ctrl->comp_seen = false;
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
return;
}
@@ -1178,7 +1193,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
if (unlikely(ctrl->kato == 0))
return;
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
}
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
@@ -2170,28 +2185,28 @@ static int nvme_configure_acre(struct nvme_ctrl *ctrl)
return ret;
}
+/*
+ * APST (Autonomous Power State Transition) lets us program a table of power
+ * state transitions that the controller will perform automatically.
+ * We configure it with a simple heuristic: we are willing to spend at most 2%
+ * of the time transitioning between power states. Therefore, when running in
+ * any given state, we will enter the next lower-power non-operational state
+ * after waiting 50 * (enlat + exlat) microseconds, as long as that state's exit
+ * latency is under the requested maximum latency.
+ *
+ * We will not autonomously enter any non-operational state for which the total
+ * latency exceeds ps_max_latency_us.
+ *
+ * Users can set ps_max_latency_us to zero to turn off APST.
+ */
static int nvme_configure_apst(struct nvme_ctrl *ctrl)
{
- /*
- * APST (Autonomous Power State Transition) lets us program a
- * table of power state transitions that the controller will
- * perform automatically. We configure it with a simple
- * heuristic: we are willing to spend at most 2% of the time
- * transitioning between power states. Therefore, when running
- * in any given state, we will enter the next lower-power
- * non-operational state after waiting 50 * (enlat + exlat)
- * microseconds, as long as that state's exit latency is under
- * the requested maximum latency.
- *
- * We will not autonomously enter any non-operational state for
- * which the total latency exceeds ps_max_latency_us. Users
- * can set ps_max_latency_us to zero to turn off APST.
- */
-
- unsigned apste;
struct nvme_feat_auto_pst *table;
+ unsigned apste = 0;
u64 max_lat_us = 0;
+ __le64 target = 0;
int max_ps = -1;
+ int state;
int ret;
/*
@@ -2212,83 +2227,72 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl)
if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
/* Turn off APST. */
- apste = 0;
dev_dbg(ctrl->device, "APST disabled\n");
- } else {
- __le64 target = cpu_to_le64(0);
- int state;
-
- /*
- * Walk through all states from lowest- to highest-power.
- * According to the spec, lower-numbered states use more
- * power. NPSS, despite the name, is the index of the
- * lowest-power state, not the number of states.
- */
- for (state = (int)ctrl->npss; state >= 0; state--) {
- u64 total_latency_us, exit_latency_us, transition_ms;
-
- if (target)
- table->entries[state] = target;
-
- /*
- * Don't allow transitions to the deepest state
- * if it's quirked off.
- */
- if (state == ctrl->npss &&
- (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
- continue;
-
- /*
- * Is this state a useful non-operational state for
- * higher-power states to autonomously transition to?
- */
- if (!(ctrl->psd[state].flags &
- NVME_PS_FLAGS_NON_OP_STATE))
- continue;
-
- exit_latency_us =
- (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
- if (exit_latency_us > ctrl->ps_max_latency_us)
- continue;
+ goto done;
+ }
- total_latency_us =
- exit_latency_us +
- le32_to_cpu(ctrl->psd[state].entry_lat);
+ /*
+ * Walk through all states from lowest- to highest-power.
+ * According to the spec, lower-numbered states use more power. NPSS,
+ * despite the name, is the index of the lowest-power state, not the
+ * number of states.
+ */
+ for (state = (int)ctrl->npss; state >= 0; state--) {
+ u64 total_latency_us, exit_latency_us, transition_ms;
- /*
- * This state is good. Use it as the APST idle
- * target for higher power states.
- */
- transition_ms = total_latency_us + 19;
- do_div(transition_ms, 20);
- if (transition_ms > (1 << 24) - 1)
- transition_ms = (1 << 24) - 1;
+ if (target)
+ table->entries[state] = target;
- target = cpu_to_le64((state << 3) |
- (transition_ms << 8));
+ /*
+ * Don't allow transitions to the deepest state if it's quirked
+ * off.
+ */
+ if (state == ctrl->npss &&
+ (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
+ continue;
- if (max_ps == -1)
- max_ps = state;
+ /*
+ * Is this state a useful non-operational state for higher-power
+ * states to autonomously transition to?
+ */
+ if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
+ continue;
- if (total_latency_us > max_lat_us)
- max_lat_us = total_latency_us;
- }
+ exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
+ if (exit_latency_us > ctrl->ps_max_latency_us)
+ continue;
- apste = 1;
+ total_latency_us = exit_latency_us +
+ le32_to_cpu(ctrl->psd[state].entry_lat);
- if (max_ps == -1) {
- dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
- } else {
- dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
- max_ps, max_lat_us, (int)sizeof(*table), table);
- }
+ /*
+ * This state is good. Use it as the APST idle target for
+ * higher power states.
+ */
+ transition_ms = total_latency_us + 19;
+ do_div(transition_ms, 20);
+ if (transition_ms > (1 << 24) - 1)
+ transition_ms = (1 << 24) - 1;
+
+ target = cpu_to_le64((state << 3) | (transition_ms << 8));
+ if (max_ps == -1)
+ max_ps = state;
+ if (total_latency_us > max_lat_us)
+ max_lat_us = total_latency_us;
}
+ if (max_ps == -1)
+ dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
+ else
+ dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
+ max_ps, max_lat_us, (int)sizeof(*table), table);
+ apste = 1;
+
+done:
ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
table, sizeof(*table), NULL);
if (ret)
dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
-
kfree(table);
return ret;
}
@@ -2310,7 +2314,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
if (ctrl->ps_max_latency_us != latency) {
ctrl->ps_max_latency_us = latency;
- nvme_configure_apst(ctrl);
+ if (ctrl->state == NVME_CTRL_LIVE)
+ nvme_configure_apst(ctrl);
}
}
@@ -3161,6 +3166,7 @@ nvme_show_int_function(cntlid);
nvme_show_int_function(numa_node);
nvme_show_int_function(queue_count);
nvme_show_int_function(sqsize);
+nvme_show_int_function(kato);
static ssize_t nvme_sysfs_delete(struct device *dev,
struct device_attribute *attr, const char *buf,
@@ -3358,6 +3364,7 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_ctrl_loss_tmo.attr,
&dev_attr_reconnect_delay.attr,
&dev_attr_fast_io_fail_tmo.attr,
+ &dev_attr_kato.attr,
NULL
};
@@ -3426,6 +3433,66 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
return 0;
}
+void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
+{
+ cdev_device_del(cdev, cdev_device);
+ ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(cdev_device->devt));
+}
+
+int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
+ const struct file_operations *fops, struct module *owner)
+{
+ int minor, ret;
+
+ minor = ida_simple_get(&nvme_ns_chr_minor_ida, 0, 0, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
+ cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
+ cdev_device->class = nvme_ns_chr_class;
+ device_initialize(cdev_device);
+ cdev_init(cdev, fops);
+ cdev->owner = owner;
+ ret = cdev_device_add(cdev, cdev_device);
+ if (ret)
+ ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
+ return ret;
+}
+
+static int nvme_ns_chr_open(struct inode *inode, struct file *file)
+{
+ return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
+}
+
+static int nvme_ns_chr_release(struct inode *inode, struct file *file)
+{
+ nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
+ return 0;
+}
+
+static const struct file_operations nvme_ns_chr_fops = {
+ .owner = THIS_MODULE,
+ .open = nvme_ns_chr_open,
+ .release = nvme_ns_chr_release,
+ .unlocked_ioctl = nvme_ns_chr_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+static int nvme_add_ns_cdev(struct nvme_ns *ns)
+{
+ int ret;
+
+ ns->cdev_device.parent = ns->ctrl->device;
+ ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
+ ns->ctrl->instance, ns->head->instance);
+ if (ret)
+ return ret;
+ ret = nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
+ ns->ctrl->ops->module);
+ if (ret)
+ kfree_const(ns->cdev_device.kobj.name);
+ return ret;
+}
+
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
unsigned nsid, struct nvme_ns_ids *ids)
{
@@ -3627,6 +3694,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
nvme_get_ctrl(ctrl);
device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
+ if (!nvme_ns_head_multipath(ns->head))
+ nvme_add_ns_cdev(ns);
nvme_mpath_add_disk(ns, id);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
@@ -3671,6 +3740,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
if (ns->disk->flags & GENHD_FL_UP) {
+ if (!nvme_ns_head_multipath(ns->head))
+ nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
if (blk_get_integrity(ns->disk))
@@ -4461,8 +4532,24 @@ static int __init nvme_core_init(void)
result = PTR_ERR(nvme_subsys_class);
goto destroy_class;
}
+
+ result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
+ "nvme-generic");
+ if (result < 0)
+ goto destroy_subsys_class;
+
+ nvme_ns_chr_class = class_create(THIS_MODULE, "nvme-generic");
+ if (IS_ERR(nvme_ns_chr_class)) {
+ result = PTR_ERR(nvme_ns_chr_class);
+ goto unregister_generic_ns;
+ }
+
return 0;
+unregister_generic_ns:
+ unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
+destroy_subsys_class:
+ class_destroy(nvme_subsys_class);
destroy_class:
class_destroy(nvme_class);
unregister_chrdev:
@@ -4479,12 +4566,15 @@ out:
static void __exit nvme_core_exit(void)
{
+ class_destroy(nvme_ns_chr_class);
class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
+ unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_workqueue(nvme_delete_wq);
destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq);
+ ida_destroy(&nvme_ns_chr_minor_ida);
ida_destroy(&nvme_instance_ida);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 604ab0e5a2ad..13c2747e3d00 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -379,10 +379,8 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
/*
* Set keep-alive timeout in seconds granularity (ms * 1000)
- * and add a grace period for controller kato enforcement
*/
- cmd.connect.kato = ctrl->kato ?
- cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000) : 0;
+ cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
if (ctrl->opts->disable_sqflow)
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 8e05d65c9e93..502f8e4a2a1f 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -346,15 +346,27 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
}
}
+static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg)
+{
+ if (is_ctrl_ioctl(cmd))
+ return nvme_ctrl_ioctl(ns->ctrl, cmd, arg);
+ return nvme_ns_ioctl(ns, cmd, arg);
+}
+
int nvme_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nvme_ns *ns = bdev->bd_disk->private_data;
- void __user *argp = (void __user *)arg;
- if (is_ctrl_ioctl(cmd))
- return nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
- return nvme_ns_ioctl(ns, cmd, argp);
+ return __nvme_ioctl(ns, cmd, (void __user *)arg);
+}
+
+long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns *ns =
+ container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
+
+ return __nvme_ioctl(ns, cmd, (void __user *)arg);
}
#ifdef CONFIG_NVME_MULTIPATH
@@ -388,10 +400,24 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nvme_ns_head *head = bdev->bd_disk->private_data;
+ void __user *argp = (void __user *)arg;
+
+ if (is_ctrl_ioctl(cmd))
+ return nvme_ns_head_ctrl_ioctl(head, cmd, argp);
+ return nvme_ns_head_ns_ioctl(head, cmd, argp);
+}
+
+long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct cdev *cdev = file_inode(file)->i_cdev;
+ struct nvme_ns_head *head =
+ container_of(cdev, struct nvme_ns_head, cdev);
+ void __user *argp = (void __user *)arg;
if (is_ctrl_ioctl(cmd))
- return nvme_ns_head_ctrl_ioctl(head, cmd, (void __user *)arg);
- return nvme_ns_head_ns_ioctl(head, cmd, (void __user *)arg);
+ return nvme_ns_head_ctrl_ioctl(head, cmd, argp);
+ return nvme_ns_head_ns_ioctl(head, cmd, argp);
}
#endif /* CONFIG_NVME_MULTIPATH */
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 68918ea1d3d0..0d0de3433f37 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -357,6 +357,48 @@ const struct block_device_operations nvme_ns_head_ops = {
.pr_ops = &nvme_pr_ops,
};
+static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
+{
+ return container_of(cdev, struct nvme_ns_head, cdev);
+}
+
+static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
+{
+ if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
+ return -ENXIO;
+ return 0;
+}
+
+static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
+{
+ nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
+ return 0;
+}
+
+static const struct file_operations nvme_ns_head_chr_fops = {
+ .owner = THIS_MODULE,
+ .open = nvme_ns_head_chr_open,
+ .release = nvme_ns_head_chr_release,
+ .unlocked_ioctl = nvme_ns_head_chr_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
+{
+ int ret;
+
+ head->cdev_device.parent = &head->subsys->dev;
+ ret = dev_set_name(&head->cdev_device, "ng%dn%d",
+ head->subsys->instance, head->instance);
+ if (ret)
+ return ret;
+ ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
+ &nvme_ns_head_chr_fops, THIS_MODULE);
+ if (ret)
+ kfree_const(head->cdev_device.kobj.name);
+ return ret;
+}
+
static void nvme_requeue_work(struct work_struct *work)
{
struct nvme_ns_head *head =
@@ -435,9 +477,11 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
if (!head->disk)
return;
- if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
+ if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_id_attr_groups);
+ nvme_add_ns_head_cdev(head);
+ }
mutex_lock(&head->lock);
if (nvme_path_is_optimized(ns)) {
@@ -714,8 +758,10 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- if (head->disk->flags & GENHD_FL_UP)
+ if (head->disk->flags & GENHD_FL_UP) {
+ nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk);
+ }
blk_set_queue_dying(head->disk->queue);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
@@ -785,4 +831,3 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
kfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = NULL;
}
-
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index c6102ce83bb4..773dde5b231d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -27,7 +27,6 @@ extern unsigned int admin_timeout;
#define NVME_ADMIN_TIMEOUT (admin_timeout * HZ)
#define NVME_DEFAULT_KATO 5
-#define NVME_KATO_GRACE 10
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define NVME_INLINE_SG_CNT 0
@@ -413,6 +412,10 @@ struct nvme_ns_head {
bool shared;
int instance;
struct nvme_effects_log *effects;
+
+ struct cdev cdev;
+ struct device cdev_device;
+
struct gendisk *disk;
#ifdef CONFIG_NVME_MULTIPATH
struct bio_list requeue_list;
@@ -465,6 +468,9 @@ struct nvme_ns {
#define NVME_NS_ANA_PENDING 2
#define NVME_NS_FORCE_RO 3
+ struct cdev cdev;
+ struct device cdev_device;
+
struct nvme_fault_inject fault_inject;
};
@@ -659,10 +665,16 @@ void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
bool nvme_tryget_ns_head(struct nvme_ns_head *head);
void nvme_put_ns_head(struct nvme_ns_head *head);
struct nvme_ctrl *nvme_find_get_live_ctrl(struct nvme_subsystem *subsys);
+int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
+ const struct file_operations *fops, struct module *owner);
+void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
int nvme_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
+long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
+long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index f4cc32674edd..d2a26ff3f7b3 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -919,15 +919,21 @@ void nvmet_execute_async_event(struct nvmet_req *req)
void nvmet_execute_keep_alive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = 0;
if (!nvmet_check_transfer_len(req, 0))
return;
+ if (!ctrl->kato) {
+ status = NVME_SC_KA_TIMEOUT_INVALID;
+ goto out;
+ }
+
pr_debug("ctrl %d update keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
-
mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
- nvmet_req_complete(req, 0);
+out:
+ nvmet_req_complete(req, status);
}
u16 nvmet_parse_admin_cmd(struct nvmet_req *req)