diff options
Diffstat (limited to 'drivers/nvme/host/core.c')
-rw-r--r-- | drivers/nvme/host/core.c | 1308 |
1 files changed, 829 insertions, 479 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f3a01b79148c..ba6508455e18 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4,6 +4,7 @@ * Copyright (c) 2011-2014, Intel Corporation. */ +#include <linux/async.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/blk-integrity.h> @@ -20,6 +21,7 @@ #include <linux/ptrace.h> #include <linux/nvme_ioctl.h> #include <linux/pm_qos.h> +#include <linux/ratelimit.h> #include <asm/unaligned.h> #include "nvme.h" @@ -35,6 +37,7 @@ struct nvme_ns_info { struct nvme_ns_ids ids; u32 nsid; __le32 anagrpid; + u8 pi_offset; bool is_shared; bool is_readonly; bool is_ready; @@ -109,16 +112,25 @@ struct workqueue_struct *nvme_delete_wq; EXPORT_SYMBOL_GPL(nvme_delete_wq); static LIST_HEAD(nvme_subsystems); -static DEFINE_MUTEX(nvme_subsystems_lock); +DEFINE_MUTEX(nvme_subsystems_lock); static DEFINE_IDA(nvme_instance_ida); static dev_t nvme_ctrl_base_chr_devt; -static struct class *nvme_class; -static struct class *nvme_subsys_class; +static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env); +static const struct class nvme_class = { + .name = "nvme", + .dev_uevent = nvme_class_uevent, +}; + +static const struct class nvme_subsys_class = { + .name = "nvme-subsystem", +}; static DEFINE_IDA(nvme_ns_chr_minor_ida); static dev_t nvme_ns_chr_devt; -static struct class *nvme_ns_chr_class; +static const struct class nvme_ns_chr_class = { + .name = "nvme-generic", +}; static void nvme_put_subsystem(struct nvme_subsystem *subsys); static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, @@ -131,7 +143,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl) /* * Only new queue scan work when admin and IO queues are both alive */ - if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) + if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset) queue_work(nvme_wq, &ctrl->scan_work); } @@ -143,7 +155,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl) */ int nvme_try_sched_reset(struct nvme_ctrl *ctrl) { - if (ctrl->state != NVME_CTRL_RESETTING) + if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING) return -EBUSY; if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) return -EBUSY; @@ -156,7 +168,7 @@ static void nvme_failfast_work(struct work_struct *work) struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), struct nvme_ctrl, failfast_work); - if (ctrl->state != NVME_CTRL_CONNECTING) + if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING) return; set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); @@ -200,7 +212,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) ret = nvme_reset_ctrl(ctrl); if (!ret) { flush_work(&ctrl->reset_work); - if (ctrl->state != NVME_CTRL_LIVE) + if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) ret = -ENETRESET; } @@ -251,7 +263,7 @@ void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) static blk_status_t nvme_error_status(u16 status) { - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_SUCCESS: return BLK_STS_OK; case NVME_SC_CAP_EXCEEDED: @@ -297,7 +309,7 @@ static void nvme_retry_req(struct request *req) u16 crd; /* The mask and shift result must be <= 3 */ - crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; + crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11; if (crd) delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; @@ -312,17 +324,17 @@ static void nvme_log_error(struct request *req) struct nvme_request *nr = nvme_req(req); if (ns) { - pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", + pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", ns->disk ? ns->disk->disk_name : "?", nvme_get_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, - (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)), - (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift, + nvme_sect_to_lba(ns->head, blk_rq_pos(req)), + blk_rq_bytes(req) >> ns->head->lba_shift, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : ""); + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : ""); return; } @@ -331,10 +343,34 @@ static void nvme_log_error(struct request *req) nvme_get_admin_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : ""); + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : ""); +} + +static void nvme_log_err_passthru(struct request *req) +{ + struct nvme_ns *ns = req->q->queuedata; + struct nvme_request *nr = nvme_req(req); + + pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s" + "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n", + ns ? ns->disk->disk_name : dev_name(nr->ctrl->device), + ns ? nvme_get_opcode_str(nr->cmd->common.opcode) : + nvme_get_admin_opcode_str(nr->cmd->common.opcode), + nr->cmd->common.opcode, + nvme_get_error_status_str(nr->status), + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : "", + nr->cmd->common.cdw10, + nr->cmd->common.cdw11, + nr->cmd->common.cdw12, + nr->cmd->common.cdw13, + nr->cmd->common.cdw14, + nr->cmd->common.cdw14); } enum nvme_disposition { @@ -349,14 +385,14 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) if (likely(nvme_req(req)->status == 0)) return COMPLETE; - if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) - return AUTHENTICATE; - if (blk_noretry_request(req) || - (nvme_req(req)->status & NVME_SC_DNR) || + (nvme_req(req)->status & NVME_STATUS_DNR) || nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; + if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED) + return AUTHENTICATE; + if (req->cmd_flags & REQ_NVME_MPATH) { if (nvme_is_path_error(nvme_req(req)->status) || blk_queue_dying(req->q)) @@ -372,21 +408,33 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) static inline void nvme_end_req_zoned(struct request *req) { if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && - req_op(req) == REQ_OP_ZONE_APPEND) - req->__sector = nvme_lba_to_sect(req->q->queuedata, + req_op(req) == REQ_OP_ZONE_APPEND) { + struct nvme_ns *ns = req->q->queuedata; + + req->__sector = nvme_lba_to_sect(ns->head, le64_to_cpu(nvme_req(req)->result.u64)); + } } -static inline void nvme_end_req(struct request *req) +static inline void __nvme_end_req(struct request *req) { - blk_status_t status = nvme_error_status(nvme_req(req)->status); - - if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) - nvme_log_error(req); nvme_end_req_zoned(req); nvme_trace_bio_complete(req); if (req->cmd_flags & REQ_NVME_MPATH) nvme_mpath_end_request(req); +} + +void nvme_end_req(struct request *req) +{ + blk_status_t status = nvme_error_status(nvme_req(req)->status); + + if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { + if (blk_rq_is_passthrough(req)) + nvme_log_err_passthru(req); + else + nvme_log_error(req); + } + __nvme_end_req(req); blk_mq_end_request(req, status); } @@ -420,7 +468,7 @@ void nvme_complete_rq(struct request *req) nvme_failover_req(req); return; case AUTHENTICATE: -#ifdef CONFIG_NVME_AUTH +#ifdef CONFIG_NVME_HOST_AUTH queue_work(nvme_wq, &ctrl->dhchap_auth_work); nvme_retry_req(req); #else @@ -435,7 +483,7 @@ void nvme_complete_batch_req(struct request *req) { trace_nvme_complete_rq(req); nvme_cleanup_cmd(req); - nvme_end_req_zoned(req); + __nvme_end_req(req); } EXPORT_SYMBOL_GPL(nvme_complete_batch_req); @@ -499,7 +547,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, spin_lock_irqsave(&ctrl->lock, flags); - old_state = ctrl->state; + old_state = nvme_ctrl_state(ctrl); switch (new_state) { case NVME_CTRL_LIVE: switch (old_state) { @@ -567,7 +615,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, } if (changed) { - ctrl->state = new_state; + WRITE_ONCE(ctrl->state, new_state); wake_up_all(&ctrl->state_wq); } @@ -575,11 +623,11 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, if (!changed) return false; - if (ctrl->state == NVME_CTRL_LIVE) { + if (new_state == NVME_CTRL_LIVE) { if (old_state == NVME_CTRL_CONNECTING) nvme_stop_failfast_work(ctrl); nvme_kick_requeue_lists(ctrl); - } else if (ctrl->state == NVME_CTRL_CONNECTING && + } else if (new_state == NVME_CTRL_CONNECTING && old_state == NVME_CTRL_RESETTING) { nvme_start_failfast_work(ctrl); } @@ -588,27 +636,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); /* - * Returns true for sink states that can't ever transition back to live. - */ -static bool nvme_state_terminal(struct nvme_ctrl *ctrl) -{ - switch (ctrl->state) { - case NVME_CTRL_NEW: - case NVME_CTRL_LIVE: - case NVME_CTRL_RESETTING: - case NVME_CTRL_CONNECTING: - return false; - case NVME_CTRL_DELETING: - case NVME_CTRL_DELETING_NOIO: - case NVME_CTRL_DEAD: - return true; - default: - WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); - return true; - } -} - -/* * Waits for the controller state to be resetting, or returns false if it is * not possible to ever transition to that state. */ @@ -617,7 +644,7 @@ bool nvme_wait_reset(struct nvme_ctrl *ctrl) wait_event(ctrl->state_wq, nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || nvme_state_terminal(ctrl)); - return ctrl->state == NVME_CTRL_RESETTING; + return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING; } EXPORT_SYMBOL_GPL(nvme_wait_reset); @@ -653,7 +680,7 @@ static void nvme_free_ns(struct kref *kref) kfree(ns); } -static inline bool nvme_get_ns(struct nvme_ns *ns) +bool nvme_get_ns(struct nvme_ns *ns) { return kref_get_unless_zero(&ns->kref); } @@ -675,10 +702,21 @@ static inline void nvme_clear_nvme_request(struct request *req) /* initialize a passthrough request */ void nvme_init_request(struct request *req, struct nvme_command *cmd) { - if (req->q->queuedata) + struct nvme_request *nr = nvme_req(req); + bool logging_enabled; + + if (req->q->queuedata) { + struct nvme_ns *ns = req->q->disk->private_data; + + logging_enabled = ns->head->passthru_err_log_enabled; req->timeout = NVME_IO_TIMEOUT; - else /* no queuedata implies admin queue */ + } else { /* no queuedata implies admin queue */ + logging_enabled = nr->ctrl->passthru_err_log_enabled; req->timeout = NVME_ADMIN_TIMEOUT; + } + + if (!logging_enabled) + req->rq_flags |= RQF_QUIET; /* passthru commands should let the driver set the SGL flags */ cmd->common.flags &= ~NVME_CMD_SGL_ALL; @@ -687,8 +725,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd) if (req->mq_hctx->type == HCTX_TYPE_POLL) req->cmd_flags |= REQ_POLLED; nvme_clear_nvme_request(req); - req->rq_flags |= RQF_QUIET; - memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); + memcpy(nr->cmd, cmd, sizeof(*cmd)); } EXPORT_SYMBOL_GPL(nvme_init_request); @@ -704,9 +741,11 @@ EXPORT_SYMBOL_GPL(nvme_init_request); blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, struct request *rq) { - if (ctrl->state != NVME_CTRL_DELETING_NOIO && - ctrl->state != NVME_CTRL_DELETING && - ctrl->state != NVME_CTRL_DEAD && + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + if (state != NVME_CTRL_DELETING_NOIO && + state != NVME_CTRL_DELETING && + state != NVME_CTRL_DEAD && !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) return BLK_STS_RESOURCE; @@ -715,7 +754,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, - bool queue_live) + bool queue_live, enum nvme_ctrl_state state) { struct nvme_request *req = nvme_req(rq); @@ -736,7 +775,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, * command, which is require to set the queue live in the * appropinquate states. */ - switch (ctrl->state) { + switch (state) { case NVME_CTRL_CONNECTING: if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || @@ -791,8 +830,8 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, } if (queue_max_discard_segments(req->q) == 1) { - u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req)); - u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9); + u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req)); + u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9); range[0].cattr = cpu_to_le32(0); range[0].nlb = cpu_to_le32(nlb); @@ -800,8 +839,9 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, n = 1; } else { __rq_for_each_bio(bio, req) { - u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); - u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; + u64 slba = nvme_sect_to_lba(ns->head, + bio->bi_iter.bi_sector); + u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift; if (n < segments) { range[n].cattr = cpu_to_le32(0); @@ -839,7 +879,7 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, u64 ref48; /* both rw and write zeroes share the same reftag format */ - switch (ns->guard_type) { + switch (ns->head->guard_type) { case NVME_NVM_NS_16B_GUARD: cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); break; @@ -867,17 +907,18 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); cmnd->write_zeroes.slba = - cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); + cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); cmnd->write_zeroes.length = - cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); - if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC)) + if (!(req->cmd_flags & REQ_NOUNMAP) && + (ns->head->features & NVME_NS_DEAC)) cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); - if (nvme_ns_has_pi(ns)) { + if (nvme_ns_has_pi(ns->head)) { cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); - switch (ns->pi_type) { + switch (ns->head->pi_type) { case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE2: nvme_set_ref_tag(ns, cmnd, req); @@ -888,6 +929,36 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, return BLK_STS_OK; } +/* + * NVMe does not support a dedicated command to issue an atomic write. A write + * which does adhere to the device atomic limits will silently be executed + * non-atomically. The request issuer should ensure that the write is within + * the queue atomic writes limits, but just validate this in case it is not. + */ +static bool nvme_valid_atomic_write(struct request *req) +{ + struct request_queue *q = req->q; + u32 boundary_bytes = queue_atomic_write_boundary_bytes(q); + + if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q)) + return false; + + if (boundary_bytes) { + u64 mask = boundary_bytes - 1, imask = ~mask; + u64 start = blk_rq_pos(req) << SECTOR_SHIFT; + u64 end = start + blk_rq_bytes(req) - 1; + + /* If greater then must be crossing a boundary */ + if (blk_rq_bytes(req) > boundary_bytes) + return false; + + if ((start & imask) != (end & imask)) + return false; + } + + return true; +} + static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_opcode op) @@ -903,19 +974,24 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (req->cmd_flags & REQ_RAHEAD) dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; + if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) + return BLK_STS_INVAL; + cmnd->rw.opcode = op; cmnd->rw.flags = 0; cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); cmnd->rw.cdw2 = 0; cmnd->rw.cdw3 = 0; cmnd->rw.metadata = 0; - cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); - cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + cmnd->rw.slba = + cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); + cmnd->rw.length = + cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); cmnd->rw.reftag = 0; - cmnd->rw.apptag = 0; - cmnd->rw.appmask = 0; + cmnd->rw.lbat = 0; + cmnd->rw.lbatm = 0; - if (ns->ms) { + if (ns->head->ms) { /* * If formated with metadata, the block layer always provides a * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else @@ -923,12 +999,12 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, * namespace capacity to zero to prevent any I/O. */ if (!blk_integrity_rq(req)) { - if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) + if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head))) return BLK_STS_NOTSUPP; control |= NVME_RW_PRINFO_PRACT; } - switch (ns->pi_type) { + switch (ns->head->pi_type) { case NVME_NS_DPS_PI_TYPE3: control |= NVME_RW_PRINFO_PRCHK_GUARD; break; @@ -957,6 +1033,7 @@ void nvme_cleanup_cmd(struct request *req) clear_bit_unlock(0, &ctrl->discard_page_busy); else kfree(bvec_virt(&req->special_vec)); + req->rq_flags &= ~RQF_SPECIAL_PAYLOAD; } } EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); @@ -1041,20 +1118,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); */ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, union nvme_result *result, void *buffer, unsigned bufflen, - int qid, int at_head, blk_mq_req_flags_t flags) + int qid, nvme_submit_flags_t flags) { struct request *req; int ret; + blk_mq_req_flags_t blk_flags = 0; + if (flags & NVME_SUBMIT_NOWAIT) + blk_flags |= BLK_MQ_REQ_NOWAIT; + if (flags & NVME_SUBMIT_RESERVED) + blk_flags |= BLK_MQ_REQ_RESERVED; if (qid == NVME_QID_ANY) - req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); + req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags); else - req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, + req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags, qid - 1); if (IS_ERR(req)) return PTR_ERR(req); nvme_init_request(req, cmd); + if (flags & NVME_SUBMIT_RETRY) + req->cmd_flags &= ~REQ_FAILFAST_DRIVER; if (buffer && bufflen) { ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); @@ -1062,7 +1146,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, goto out; } - ret = nvme_execute_rq(req, at_head); + ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD); if (result && ret >= 0) *result = nvme_req(req)->result; out: @@ -1075,7 +1159,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buffer, unsigned bufflen) { return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, - NVME_QID_ANY, 0, 0); + NVME_QID_ANY, 0); } EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); @@ -1098,6 +1182,10 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) effects &= ~NVME_CMD_EFFECTS_CSE_MASK; } else { effects = le32_to_cpu(ctrl->effects->acs[opcode]); + + /* Ignore execution restrictions if any relaxation bits are set */ + if (effects & NVME_CMD_EFFECTS_CSER_MASK) + effects &= ~NVME_CMD_EFFECTS_CSE_MASK; } return effects; @@ -1171,7 +1259,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); /* * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: - * + * * The host should send Keep Alive commands at half of the Keep Alive Timeout * accounting for transport roundtrip times [..]. */ @@ -1192,8 +1280,16 @@ static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) { - queue_delayed_work(nvme_wq, &ctrl->ka_work, - nvme_keep_alive_work_period(ctrl)); + unsigned long now = jiffies; + unsigned long delay = nvme_keep_alive_work_period(ctrl); + unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay; + + if (time_after(now, ka_next_check_tm)) + delay = 0; + else + delay = ka_next_check_tm - now; + + queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); } static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, @@ -1331,8 +1427,10 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, sizeof(struct nvme_id_ctrl)); - if (error) + if (error) { kfree(*id); + *id = NULL; + } return error; } @@ -1442,7 +1540,7 @@ free_data: return status; } -static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, +int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, struct nvme_id_ns **id) { struct nvme_command c = { }; @@ -1461,6 +1559,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, if (error) { dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); kfree(*id); + *id = NULL; } return error; } @@ -1479,7 +1578,8 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, if (id->ncap == 0) { /* namespace not allocated or attached */ info->is_removed = true; - return -ENODEV; + ret = -ENODEV; + goto error; } info->anagrpid = id->anagrpid; @@ -1497,8 +1597,10 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); } + +error: kfree(id); - return 0; + return ret; } static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, @@ -1539,7 +1641,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, c.features.dword11 = cpu_to_le32(dword11); ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, - buffer, buflen, NVME_QID_ANY, 0, 0); + buffer, buflen, NVME_QID_ANY, 0); if (ret >= 0 && result) *result = le32_to_cpu(res.u32); return ret; @@ -1657,92 +1759,87 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -#ifdef CONFIG_BLK_DEV_INTEGRITY -static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, - u32 max_integrity_segments) +static bool nvme_init_integrity(struct nvme_ns_head *head, + struct queue_limits *lim, struct nvme_ns_info *info) { - struct blk_integrity integrity = { }; + struct blk_integrity *bi = &lim->integrity; + + memset(bi, 0, sizeof(*bi)); + + if (!head->ms) + return true; + + /* + * PI can always be supported as we can ask the controller to simply + * insert/strip it, which is not possible for other kinds of metadata. + */ + if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) || + !(head->features & NVME_NS_METADATA_SUPPORTED)) + return nvme_ns_has_pi(head); - switch (ns->pi_type) { + switch (head->pi_type) { case NVME_NS_DPS_PI_TYPE3: - switch (ns->guard_type) { + switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: - integrity.profile = &t10_pi_type3_crc; - integrity.tag_size = sizeof(u16) + sizeof(u32); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; + bi->tag_size = sizeof(u16) + sizeof(u32); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; case NVME_NVM_NS_64B_GUARD: - integrity.profile = &ext_pi_type3_crc64; - integrity.tag_size = sizeof(u16) + 6; - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; + bi->tag_size = sizeof(u16) + 6; + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; default: - integrity.profile = NULL; break; } break; case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE2: - switch (ns->guard_type) { + switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: - integrity.profile = &t10_pi_type1_crc; - integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; + bi->tag_size = sizeof(u16); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | + BLK_INTEGRITY_REF_TAG; break; case NVME_NVM_NS_64B_GUARD: - integrity.profile = &ext_pi_type1_crc64; - integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; + bi->tag_size = sizeof(u16); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | + BLK_INTEGRITY_REF_TAG; break; default: - integrity.profile = NULL; break; } break; default: - integrity.profile = NULL; break; } - integrity.tuple_size = ns->ms; - blk_integrity_register(disk, &integrity); - blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); -} -#else -static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, - u32 max_integrity_segments) -{ + bi->tuple_size = head->ms; + bi->pi_offset = info->pi_offset; + return true; } -#endif /* CONFIG_BLK_DEV_INTEGRITY */ -static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) +static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim) { struct nvme_ctrl *ctrl = ns->ctrl; - struct request_queue *queue = disk->queue; - u32 size = queue_logical_block_size(queue); - - if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX)) - ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl); - if (ctrl->max_discard_sectors == 0) { - blk_queue_max_discard_sectors(queue, 0); - return; - } - - BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < - NVME_DSM_MAX_RANGES); - - queue->limits.discard_granularity = size; - - /* If discard is already enabled, don't reset queue limits */ - if (queue->limits.max_discard_sectors) - return; + if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX)) + lim->max_hw_discard_sectors = + nvme_lba_to_sect(ns->head, ctrl->dmrsl); + else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) + lim->max_hw_discard_sectors = UINT_MAX; + else + lim->max_hw_discard_sectors = 0; - blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors); - blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); + lim->discard_granularity = lim->logical_block_size; - if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) - blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); + if (ctrl->dmrl) + lim->max_discard_segments = ctrl->dmrl; + else + lim->max_discard_segments = NVME_DSM_MAX_RANGES; } static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) @@ -1753,76 +1850,80 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) a->csi == b->csi; } -static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) +static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid, + struct nvme_id_ns_nvm **nvmp) { - bool first = id->dps & NVME_NS_DPS_PI_FIRST; - unsigned lbaf = nvme_lbaf_index(id->flbas); - struct nvme_ctrl *ctrl = ns->ctrl; - struct nvme_command c = { }; + struct nvme_command c = { + .identify.opcode = nvme_admin_identify, + .identify.nsid = cpu_to_le32(nsid), + .identify.cns = NVME_ID_CNS_CS_NS, + .identify.csi = NVME_CSI_NVM, + }; struct nvme_id_ns_nvm *nvm; - int ret = 0; - u32 elbaf; - - ns->pi_size = 0; - ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); - if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { - ns->pi_size = sizeof(struct t10_pi_tuple); - ns->guard_type = NVME_NVM_NS_16B_GUARD; - goto set_pi; - } + int ret; nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); if (!nvm) return -ENOMEM; - c.identify.opcode = nvme_admin_identify; - c.identify.nsid = cpu_to_le32(ns->head->ns_id); - c.identify.cns = NVME_ID_CNS_CS_NS; - c.identify.csi = NVME_CSI_NVM; - - ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm)); + ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm)); if (ret) - goto free_data; + kfree(nvm); + else + *nvmp = nvm; + return ret; +} - elbaf = le32_to_cpu(nvm->elbaf[lbaf]); +static void nvme_configure_pi_elbas(struct nvme_ns_head *head, + struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm) +{ + u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]); + u8 guard_type; /* no support for storage tag formats right now */ if (nvme_elbaf_sts(elbaf)) - goto free_data; + return; - ns->guard_type = nvme_elbaf_guard_type(elbaf); - switch (ns->guard_type) { + guard_type = nvme_elbaf_guard_type(elbaf); + if ((nvm->pic & NVME_ID_NS_NVM_QPIFS) && + guard_type == NVME_NVM_NS_QTYPE_GUARD) + guard_type = nvme_elbaf_qualified_guard_type(elbaf); + + head->guard_type = guard_type; + switch (head->guard_type) { case NVME_NVM_NS_64B_GUARD: - ns->pi_size = sizeof(struct crc64_pi_tuple); + head->pi_size = sizeof(struct crc64_pi_tuple); break; case NVME_NVM_NS_16B_GUARD: - ns->pi_size = sizeof(struct t10_pi_tuple); + head->pi_size = sizeof(struct t10_pi_tuple); break; default: break; } - -free_data: - kfree(nvm); -set_pi: - if (ns->pi_size && (first || ns->ms == ns->pi_size)) - ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; - else - ns->pi_type = 0; - - return ret; } -static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) +static void nvme_configure_metadata(struct nvme_ctrl *ctrl, + struct nvme_ns_head *head, struct nvme_id_ns *id, + struct nvme_id_ns_nvm *nvm, struct nvme_ns_info *info) { - struct nvme_ctrl *ctrl = ns->ctrl; - - if (nvme_init_ms(ns, id)) + head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); + head->pi_type = 0; + head->pi_size = 0; + head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms); + if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) return; - ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); - if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) - return; + if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { + nvme_configure_pi_elbas(head, id, nvm); + } else { + head->pi_size = sizeof(struct t10_pi_tuple); + head->guard_type = NVME_NVM_NS_16B_GUARD; + } + + if (head->pi_size && head->ms >= head->pi_size) + head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; + if (!(id->dps & NVME_NS_DPS_PI_FIRST)) + info->pi_offset = head->ms - head->pi_size; if (ctrl->ops->flags & NVME_F_FABRICS) { /* @@ -1833,7 +1934,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) return; - ns->features |= NVME_NS_EXT_LBAS; + head->features |= NVME_NS_EXT_LBAS; /* * The current fabrics transport drivers support namespace @@ -1844,8 +1945,8 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) * Note, this check will need to be modified if any drivers * gain the ability to use other metadata formats. */ - if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) - ns->features |= NVME_NS_METADATA_SUPPORTED; + if (ctrl->max_integrity_segments && nvme_ns_has_pi(head)) + head->features |= NVME_NS_METADATA_SUPPORTED; } else { /* * For PCIe controllers, we can't easily remap the separate @@ -1854,48 +1955,64 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) * We allow extended LBAs for the passthrough interface, though. */ if (id->flbas & NVME_NS_FLBAS_META_EXT) - ns->features |= NVME_NS_EXT_LBAS; + head->features |= NVME_NS_EXT_LBAS; else - ns->features |= NVME_NS_METADATA_SUPPORTED; + head->features |= NVME_NS_METADATA_SUPPORTED; } } -static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, - struct request_queue *q) -{ - bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; - if (ctrl->max_hw_sectors) { - u32 max_segments = - (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; +static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns, + struct nvme_id_ns *id, struct queue_limits *lim, + u32 bs, u32 atomic_bs) +{ + unsigned int boundary = 0; - max_segments = min_not_zero(max_segments, ctrl->max_segments); - blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); - blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); + if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) { + if (le16_to_cpu(id->nabspf)) + boundary = (le16_to_cpu(id->nabspf) + 1) * bs; } - blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); - blk_queue_dma_alignment(q, 3); - blk_queue_write_cache(q, vwc, vwc); + lim->atomic_write_hw_max = atomic_bs; + lim->atomic_write_hw_boundary = boundary; + lim->atomic_write_hw_unit_min = bs; + lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs); +} + +static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl) +{ + return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1; +} + +static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl, + struct queue_limits *lim) +{ + lim->max_hw_sectors = ctrl->max_hw_sectors; + lim->max_segments = min_t(u32, USHRT_MAX, + min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments)); + lim->max_integrity_segments = ctrl->max_integrity_segments; + lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1; + lim->max_segment_size = UINT_MAX; + lim->dma_alignment = 3; } -static void nvme_update_disk_info(struct gendisk *disk, - struct nvme_ns *ns, struct nvme_id_ns *id) +static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, + struct queue_limits *lim) { - sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); - u32 bs = 1U << ns->lba_shift; + struct nvme_ns_head *head = ns->head; + u32 bs = 1U << head->lba_shift; u32 atomic_bs, phys_bs, io_opt = 0; + bool valid = true; /* * The block layer can't support LBA sizes larger than the page size - * yet, so catch this early and don't allow block I/O. + * or smaller than a sector size yet, so catch this early and don't + * allow block I/O. */ - if (ns->lba_shift > PAGE_SHIFT) { - capacity = 0; + if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) { bs = (1 << 9); + valid = false; } - blk_integrity_unregister(disk); - atomic_bs = phys_bs = bs; if (id->nabo == 0) { /* @@ -1907,45 +2024,32 @@ static void nvme_update_disk_info(struct gendisk *disk, atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; else atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; + + nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs); } if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { /* NPWG = Namespace Preferred Write Granularity */ phys_bs = bs * (1 + le16_to_cpu(id->npwg)); /* NOWS = Namespace Optimal Write Size */ - io_opt = bs * (1 + le16_to_cpu(id->nows)); + if (id->nows) + io_opt = bs * (1 + le16_to_cpu(id->nows)); } - blk_queue_logical_block_size(disk->queue, bs); /* * Linux filesystems assume writing a single physical block is * an atomic operation. Hence limit the physical block size to the * value of the Atomic Write Unit Power Fail parameter. */ - blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); - blk_queue_io_min(disk->queue, phys_bs); - blk_queue_io_opt(disk->queue, io_opt); - - /* - * Register a metadata profile for PI, or the plain non-integrity NVMe - * metadata masquerading as Type 0 if supported, otherwise reject block - * I/O to namespaces with metadata except when the namespace supports - * PI, as it can strip/insert in that case. - */ - if (ns->ms) { - if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && - (ns->features & NVME_NS_METADATA_SUPPORTED)) - nvme_init_integrity(disk, ns, - ns->ctrl->max_integrity_segments); - else if (!nvme_ns_has_pi(ns)) - capacity = 0; - } - - set_capacity_and_notify(disk, capacity); - - nvme_config_discard(disk, ns); - blk_queue_max_write_zeroes_sectors(disk->queue, - ns->ctrl->max_zeroes_sectors); + lim->logical_block_size = bs; + lim->physical_block_size = min(phys_bs, atomic_bs); + lim->io_min = phys_bs; + lim->io_opt = io_opt; + if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) + lim->max_write_zeroes_sectors = UINT_MAX; + else + lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors; + return valid; } static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) @@ -1959,7 +2063,8 @@ static inline bool nvme_first_scan(struct gendisk *disk) return !disk_live(disk); } -static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) +static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id, + struct queue_limits *lim) { struct nvme_ctrl *ctrl = ns->ctrl; u32 iob; @@ -1968,7 +2073,7 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) is_power_of_2(ctrl->max_hw_sectors)) iob = ctrl->max_hw_sectors; else - iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); + iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob)); if (!iob) return; @@ -1987,38 +2092,36 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) return; } - blk_queue_chunk_sectors(ns->queue, iob); + lim->chunk_sectors = iob; } static int nvme_update_ns_info_generic(struct nvme_ns *ns, struct nvme_ns_info *info) { + struct queue_limits lim; + int ret; + blk_mq_freeze_queue(ns->disk->queue); - nvme_set_queue_limits(ns->ctrl, ns->queue); + lim = queue_limits_start_update(ns->disk->queue); + nvme_set_ctrl_limits(ns->ctrl, &lim); + ret = queue_limits_commit_update(ns->disk->queue, &lim); set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); blk_mq_unfreeze_queue(ns->disk->queue); - if (nvme_ns_head_multipath(ns->head)) { - blk_mq_freeze_queue(ns->head->disk->queue); - set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); - nvme_mpath_revalidate_paths(ns); - blk_stack_limits(&ns->head->disk->queue->limits, - &ns->queue->limits, 0); - ns->head->disk->flags |= GENHD_FL_HIDDEN; - blk_mq_unfreeze_queue(ns->head->disk->queue); - } - /* Hide the block-interface for these devices */ - ns->disk->flags |= GENHD_FL_HIDDEN; - set_bit(NVME_NS_READY, &ns->flags); - - return 0; + if (!ret) + ret = -ENODEV; + return ret; } static int nvme_update_ns_info_block(struct nvme_ns *ns, struct nvme_ns_info *info) { + struct queue_limits lim; + struct nvme_id_ns_nvm *nvm = NULL; + struct nvme_zone_info zi = {}; struct nvme_id_ns *id; + sector_t capacity; unsigned lbaf; int ret; @@ -2026,23 +2129,65 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, if (ret) return ret; - blk_mq_freeze_queue(ns->disk->queue); + if (id->ncap == 0) { + /* namespace not allocated or attached */ + info->is_removed = true; + ret = -ENXIO; + goto out; + } lbaf = nvme_lbaf_index(id->flbas); - ns->lba_shift = id->lbaf[lbaf].ds; - nvme_set_queue_limits(ns->ctrl, ns->queue); - nvme_configure_metadata(ns, id); - nvme_set_chunk_sectors(ns, id); - nvme_update_disk_info(ns->disk, ns, id); + if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) { + ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm); + if (ret < 0) + goto out; + } - if (ns->head->ids.csi == NVME_CSI_ZNS) { - ret = nvme_update_zone_info(ns, lbaf); - if (ret) { - blk_mq_unfreeze_queue(ns->disk->queue); + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + ns->head->ids.csi == NVME_CSI_ZNS) { + ret = nvme_query_zone_info(ns, lbaf, &zi); + if (ret < 0) goto out; - } } + blk_mq_freeze_queue(ns->disk->queue); + ns->head->lba_shift = id->lbaf[lbaf].ds; + ns->head->nuse = le64_to_cpu(id->nuse); + capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze)); + + lim = queue_limits_start_update(ns->disk->queue); + nvme_set_ctrl_limits(ns->ctrl, &lim); + nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info); + nvme_set_chunk_sectors(ns, id, &lim); + if (!nvme_update_disk_info(ns, id, &lim)) + capacity = 0; + nvme_config_discard(ns, &lim); + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + ns->head->ids.csi == NVME_CSI_ZNS) + nvme_update_zone_info(ns, &lim, &zi); + + if (ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) + lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; + else + lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); + + /* + * Register a metadata profile for PI, or the plain non-integrity NVMe + * metadata masquerading as Type 0 if supported, otherwise reject block + * I/O to namespaces with metadata except when the namespace supports + * PI, as it can strip/insert in that case. + */ + if (!nvme_init_integrity(ns->head, &lim, info)) + capacity = 0; + + ret = queue_limits_commit_update(ns->disk->queue, &lim); + if (ret) { + blk_mq_unfreeze_queue(ns->disk->queue); + goto out; + } + + set_capacity_and_notify(ns->disk, capacity); + /* * Only set the DEAC bit if the device guarantees that reads from * deallocated data return zeroes. While the DEAC bit does not @@ -2050,62 +2195,129 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, * do not return zeroes. */ if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) - ns->features |= NVME_NS_DEAC; + ns->head->features |= NVME_NS_DEAC; set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); set_bit(NVME_NS_READY, &ns->flags); blk_mq_unfreeze_queue(ns->disk->queue); if (blk_queue_is_zoned(ns->queue)) { - ret = nvme_revalidate_zones(ns); + ret = blk_revalidate_disk_zones(ns->disk); if (ret && !nvme_first_scan(ns->disk)) goto out; } - if (nvme_ns_head_multipath(ns->head)) { - blk_mq_freeze_queue(ns->head->disk->queue); - nvme_update_disk_info(ns->head->disk, ns, id); - set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); - nvme_mpath_revalidate_paths(ns); - blk_stack_limits(&ns->head->disk->queue->limits, - &ns->queue->limits, 0); - disk_update_readahead(ns->head->disk); - blk_mq_unfreeze_queue(ns->head->disk->queue); - } - ret = 0; out: - /* - * If probing fails due an unsupported feature, hide the block device, - * but still allow other access. - */ - if (ret == -ENODEV) { - ns->disk->flags |= GENHD_FL_HIDDEN; - set_bit(NVME_NS_READY, &ns->flags); - ret = 0; - } + kfree(nvm); kfree(id); return ret; } static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) { + bool unsupported = false; + int ret; + switch (info->ids.csi) { case NVME_CSI_ZNS: if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { dev_info(ns->ctrl->device, "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", info->nsid); - return nvme_update_ns_info_generic(ns, info); + ret = nvme_update_ns_info_generic(ns, info); + break; } - return nvme_update_ns_info_block(ns, info); + ret = nvme_update_ns_info_block(ns, info); + break; case NVME_CSI_NVM: - return nvme_update_ns_info_block(ns, info); + ret = nvme_update_ns_info_block(ns, info); + break; default: dev_info(ns->ctrl->device, "block device for nsid %u not supported (csi %u)\n", info->nsid, info->ids.csi); - return nvme_update_ns_info_generic(ns, info); + ret = nvme_update_ns_info_generic(ns, info); + break; + } + + /* + * If probing fails due an unsupported feature, hide the block device, + * but still allow other access. + */ + if (ret == -ENODEV) { + ns->disk->flags |= GENHD_FL_HIDDEN; + set_bit(NVME_NS_READY, &ns->flags); + unsupported = true; + ret = 0; + } + + if (!ret && nvme_ns_head_multipath(ns->head)) { + struct queue_limits *ns_lim = &ns->disk->queue->limits; + struct queue_limits lim; + + blk_mq_freeze_queue(ns->head->disk->queue); + /* + * queue_limits mixes values that are the hardware limitations + * for bio splitting with what is the device configuration. + * + * For NVMe the device configuration can change after e.g. a + * Format command, and we really want to pick up the new format + * value here. But we must still stack the queue limits to the + * least common denominator for multipathing to split the bios + * properly. + * + * To work around this, we explicitly set the device + * configuration to those that we just queried, but only stack + * the splitting limits in to make sure we still obey possibly + * lower limitations of other controllers. + */ + lim = queue_limits_start_update(ns->head->disk->queue); + lim.logical_block_size = ns_lim->logical_block_size; + lim.physical_block_size = ns_lim->physical_block_size; + lim.io_min = ns_lim->io_min; + lim.io_opt = ns_lim->io_opt; + queue_limits_stack_bdev(&lim, ns->disk->part0, 0, + ns->head->disk->disk_name); + if (unsupported) + ns->head->disk->flags |= GENHD_FL_HIDDEN; + else + nvme_init_integrity(ns->head, &lim, info); + ret = queue_limits_commit_update(ns->head->disk->queue, &lim); + + set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); + set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); + nvme_mpath_revalidate_paths(ns); + + blk_mq_unfreeze_queue(ns->head->disk->queue); } + + return ret; +} + +int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], + enum blk_unique_id type) +{ + struct nvme_ns_ids *ids = &ns->head->ids; + + if (type != BLK_UID_EUI64) + return -EINVAL; + + if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) { + memcpy(id, &ids->nguid, sizeof(ids->nguid)); + return sizeof(ids->nguid); + } + if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) { + memcpy(id, &ids->eui64, sizeof(ids->eui64)); + return sizeof(ids->eui64); + } + + return -EINVAL; +} + +static int nvme_get_unique_id(struct gendisk *disk, u8 id[16], + enum blk_unique_id type) +{ + return nvme_ns_get_unique_id(disk->private_data, id, type); } #ifdef CONFIG_BLK_SED_OPAL @@ -2124,7 +2336,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l cmd.common.cdw11 = cpu_to_le32(len); return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, - NVME_QID_ANY, 1, 0); + NVME_QID_ANY, NVME_SUBMIT_AT_HEAD); } static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) @@ -2163,6 +2375,7 @@ const struct block_device_operations nvme_bdev_ops = { .open = nvme_open, .release = nvme_release, .getgeo = nvme_getgeo, + .get_unique_id = nvme_get_unique_id, .report_zones = nvme_report_zones, .pr_ops = &nvme_pr_ops, }; @@ -2245,25 +2458,8 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) else ctrl->ctrl_config = NVME_CC_CSS_NVM; - if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { - u32 crto; - - ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); - if (ret) { - dev_err(ctrl->device, "Reading CRTO failed (%d)\n", - ret); - return ret; - } - - if (ctrl->cap & NVME_CAP_CRMS_CRIMS) { - ctrl->ctrl_config |= NVME_CC_CRIME; - timeout = NVME_CRTO_CRIMT(crto); - } else { - timeout = NVME_CRTO_CRWMT(crto); - } - } else { - timeout = NVME_CAP_TIMEOUT(ctrl->cap); - } + if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) + ctrl->ctrl_config |= NVME_CC_CRIME; ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; @@ -2272,11 +2468,39 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) if (ret) return ret; - /* Flush write to device (required if transport is PCI) */ - ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); + /* CAP value may change after initial CC write */ + ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); if (ret) return ret; + timeout = NVME_CAP_TIMEOUT(ctrl->cap); + if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { + u32 crto, ready_timeout; + + ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); + if (ret) { + dev_err(ctrl->device, "Reading CRTO failed (%d)\n", + ret); + return ret; + } + + /* + * CRTO should always be greater or equal to CAP.TO, but some + * devices are known to get this wrong. Use the larger of the + * two values. + */ + if (ctrl->ctrl_config & NVME_CC_CRIME) + ready_timeout = NVME_CRTO_CRIMT(crto); + else + ready_timeout = NVME_CRTO_CRWMT(crto); + + if (ready_timeout < timeout) + dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", + crto, ctrl->cap); + else + timeout = ready_timeout; + } + ctrl->ctrl_config |= NVME_CC_ENABLE; ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); if (ret) @@ -2506,7 +2730,7 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val) if (ctrl->ps_max_latency_us != latency) { ctrl->ps_max_latency_us = latency; - if (ctrl->state == NVME_CTRL_LIVE) + if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE) nvme_configure_apst(ctrl); } } @@ -2764,7 +2988,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) subsys->awupf = le16_to_cpu(id->awupf); nvme_mpath_default_iopolicy(subsys); - subsys->dev.class = nvme_subsys_class; + subsys->dev.class = &nvme_subsys_class; subsys->dev.release = nvme_release_subsystem; subsys->dev.groups = nvme_subsys_attrs_groups; dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); @@ -2874,14 +3098,6 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) struct nvme_id_ctrl_nvm *id; int ret; - if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { - ctrl->max_discard_sectors = UINT_MAX; - ctrl->max_discard_segments = NVME_DSM_MAX_RANGES; - } else { - ctrl->max_discard_sectors = 0; - ctrl->max_discard_segments = 0; - } - /* * Even though NVMe spec explicitly states that MDTS is not applicable * to the write-zeroes, we are cautious and limit the size to the @@ -2911,8 +3127,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) if (ret) goto free_data; - if (id->dmrl) - ctrl->max_discard_segments = id->dmrl; + ctrl->dmrl = id->dmrl; ctrl->dmrsl = le32_to_cpu(id->dmrsl); if (id->wzsl) ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); @@ -2980,8 +3195,50 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) return 0; } +static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) +{ + /* + * In fabrics we need to verify the cntlid matches the + * admin connect + */ + if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { + dev_err(ctrl->device, + "Mismatching cntlid: Connect %u vs Identify %u, rejecting\n", + ctrl->cntlid, le16_to_cpu(id->cntlid)); + return -EINVAL; + } + + if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { + dev_err(ctrl->device, + "keep-alive support is mandatory for fabrics\n"); + return -EINVAL; + } + + if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) { + dev_err(ctrl->device, + "I/O queue command capsule supported size %d < 4\n", + ctrl->ioccsz); + return -EINVAL; + } + + if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) { + dev_err(ctrl->device, + "I/O queue response capsule supported size %d < 1\n", + ctrl->iorcsz); + return -EINVAL; + } + + if (!ctrl->maxcmd) { + dev_err(ctrl->device, "Maximum outstanding commands is 0\n"); + return -EINVAL; + } + + return 0; +} + static int nvme_init_identify(struct nvme_ctrl *ctrl) { + struct queue_limits lim; struct nvme_id_ctrl *id; u32 max_hw_sectors; bool prev_apst_enabled; @@ -3048,7 +3305,12 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->max_hw_sectors = min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); - nvme_set_queue_limits(ctrl, ctrl->admin_q); + lim = queue_limits_start_update(ctrl->admin_q); + nvme_set_ctrl_limits(ctrl, &lim); + ret = queue_limits_commit_update(ctrl->admin_q, &lim); + if (ret) + goto out_free; + ctrl->sgls = le32_to_cpu(id->sgls); ctrl->kas = le16_to_cpu(id->kas); ctrl->max_namespaces = le32_to_cpu(id->mnan); @@ -3066,7 +3328,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) if (ctrl->shutdown_timeout != shutdown_timeout) dev_info(ctrl->device, - "Shutdown timeout set to %u seconds\n", + "D3 entry latency set to %u seconds\n", ctrl->shutdown_timeout); } else ctrl->shutdown_timeout = shutdown_timeout; @@ -3092,25 +3354,9 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->iorcsz = le32_to_cpu(id->iorcsz); ctrl->maxcmd = le16_to_cpu(id->maxcmd); - /* - * In fabrics we need to verify the cntlid matches the - * admin connect - */ - if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { - dev_err(ctrl->device, - "Mismatching cntlid: Connect %u vs Identify " - "%u, rejecting\n", - ctrl->cntlid, le16_to_cpu(id->cntlid)); - ret = -EINVAL; - goto out_free; - } - - if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { - dev_err(ctrl->device, - "keep-alive support is mandatory for fabrics\n"); - ret = -EINVAL; + ret = nvme_check_ctrl_fabric_info(ctrl, id); + if (ret) goto out_free; - } } else { ctrl->hmpre = le32_to_cpu(id->hmpre); ctrl->hmmin = le32_to_cpu(id->hmmin); @@ -3183,6 +3429,8 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags); ctrl->identified = true; + nvme_start_keep_alive(ctrl); + return 0; } EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish); @@ -3192,7 +3440,7 @@ static int nvme_dev_open(struct inode *inode, struct file *file) struct nvme_ctrl *ctrl = container_of(inode->i_cdev, struct nvme_ctrl, cdev); - switch (ctrl->state) { + switch (nvme_ctrl_state(ctrl)) { case NVME_CTRL_LIVE: break; default: @@ -3294,7 +3542,7 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, if (minor < 0) return minor; cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); - cdev_device->class = nvme_ns_chr_class; + cdev_device->class = &nvme_ns_chr_class; cdev_device->release = nvme_cdev_rel; device_initialize(cdev_device); cdev_init(cdev, fops); @@ -3367,6 +3615,8 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, head->ns_id = info->nsid; head->ids = info->ids; head->shared = info->is_shared; + ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1); + ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE); kref_init(&head->ref); if (head->ids.csi) { @@ -3502,7 +3752,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) "Found shared namespace %d, but multipathing not supported.\n", info->nsid); dev_warn_once(ctrl->device, - "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n."); + "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n"); } } @@ -3521,9 +3771,10 @@ out_unlock: struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns *ns, *ret = NULL; + int srcu_idx; - down_read(&ctrl->namespaces_rwsem); - list_for_each_entry(ns, &ctrl->namespaces, list) { + srcu_idx = srcu_read_lock(&ctrl->srcu); + list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { if (ns->head->ns_id == nsid) { if (!nvme_get_ns(ns)) continue; @@ -3533,7 +3784,7 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) if (ns->head->ns_id > nsid) break; } - up_read(&ctrl->namespaces_rwsem); + srcu_read_unlock(&ctrl->srcu, srcu_idx); return ret; } EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); @@ -3547,7 +3798,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { if (tmp->head->ns_id < ns->head->ns_id) { - list_add(&ns->list, &tmp->list); + list_add_rcu(&ns->list, &tmp->list); return; } } @@ -3556,6 +3807,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) { + struct queue_limits lim = { }; struct nvme_ns *ns; struct gendisk *disk; int node = ctrl->numa_node; @@ -3564,7 +3816,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) if (!ns) return; - disk = blk_mq_alloc_disk(ctrl->tagset, ns); + if (ctrl->opts && ctrl->opts->data_digest) + lim.features |= BLK_FEAT_STABLE_WRITES; + if (ctrl->ops->supports_pci_p2pdma && + ctrl->ops->supports_pci_p2pdma(ctrl)) + lim.features |= BLK_FEAT_PCI_P2PDMA; + + disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns); if (IS_ERR(disk)) goto out_free_ns; disk->fops = &nvme_bdev_ops; @@ -3572,15 +3830,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) ns->disk = disk; ns->queue = disk->queue; - - if (ctrl->opts && ctrl->opts->data_digest) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); - - blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); - if (ctrl->ops->supports_pci_p2pdma && - ctrl->ops->supports_pci_p2pdma(ctrl)) - blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); - ns->ctrl = ctrl; kref_init(&ns->kref); @@ -3613,12 +3862,21 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) if (nvme_update_ns_info(ns, info)) goto out_unlink_ns; - down_write(&ctrl->namespaces_rwsem); + mutex_lock(&ctrl->namespaces_lock); + /* + * Ensure that no namespaces are added to the ctrl list after the queues + * are frozen, thereby avoiding a deadlock between scan and reset. + */ + if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) { + mutex_unlock(&ctrl->namespaces_lock); + goto out_unlink_ns; + } nvme_ns_add_to_ctrl_list(ns); - up_write(&ctrl->namespaces_rwsem); + mutex_unlock(&ctrl->namespaces_lock); + synchronize_srcu(&ctrl->srcu); nvme_get_ctrl(ctrl); - if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups)) + if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups)) goto out_cleanup_ns_from_list; if (!nvme_ns_head_multipath(ns->head)) @@ -3627,13 +3885,21 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) nvme_mpath_add_disk(ns, info->anagrpid); nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); + /* + * Set ns->disk->device->driver_data to ns so we can access + * ns->head->passthru_err_log_enabled in + * nvme_io_passthru_err_log_enabled_[store | show](). + */ + dev_set_drvdata(disk_to_dev(ns->disk), ns); + return; out_cleanup_ns_from_list: nvme_put_ctrl(ctrl); - down_write(&ctrl->namespaces_rwsem); - list_del_init(&ns->list); - up_write(&ctrl->namespaces_rwsem); + mutex_lock(&ctrl->namespaces_lock); + list_del_rcu(&ns->list); + mutex_unlock(&ctrl->namespaces_lock); + synchronize_srcu(&ctrl->srcu); out_unlink_ns: mutex_lock(&ctrl->subsys->lock); list_del_rcu(&ns->siblings); @@ -3683,9 +3949,10 @@ static void nvme_ns_remove(struct nvme_ns *ns) nvme_cdev_del(&ns->cdev, &ns->cdev_device); del_gendisk(ns->disk); - down_write(&ns->ctrl->namespaces_rwsem); - list_del_init(&ns->list); - up_write(&ns->ctrl->namespaces_rwsem); + mutex_lock(&ns->ctrl->namespaces_lock); + list_del_rcu(&ns->list); + mutex_unlock(&ns->ctrl->namespaces_lock); + synchronize_srcu(&ns->ctrl->srcu); if (last_path) nvme_mpath_shutdown_disk(ns->head); @@ -3704,7 +3971,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) { - int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; + int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR; if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { dev_err(ns->ctrl->device, @@ -3720,7 +3987,7 @@ out: * * TODO: we should probably schedule a delayed retry here. */ - if (ret > 0 && (ret & NVME_SC_DNR)) + if (ret > 0 && (ret & NVME_STATUS_DNR)) nvme_ns_remove(ns); } @@ -3769,22 +4036,53 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) } } +/** + * struct async_scan_info - keeps track of controller & NSIDs to scan + * @ctrl: Controller on which namespaces are being scanned + * @next_nsid: Index of next NSID to scan in ns_list + * @ns_list: Pointer to list of NSIDs to scan + * + * Note: There is a single async_scan_info structure shared by all instances + * of nvme_scan_ns_async() scanning a given controller, so the atomic + * operations on next_nsid are critical to ensure each instance scans a unique + * NSID. + */ +struct async_scan_info { + struct nvme_ctrl *ctrl; + atomic_t next_nsid; + __le32 *ns_list; +}; + +static void nvme_scan_ns_async(void *data, async_cookie_t cookie) +{ + struct async_scan_info *scan_info = data; + int idx; + u32 nsid; + + idx = (u32)atomic_fetch_inc(&scan_info->next_nsid); + nsid = le32_to_cpu(scan_info->ns_list[idx]); + + nvme_scan_ns(scan_info->ctrl, nsid); +} + static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns *ns, *next; LIST_HEAD(rm_list); - down_write(&ctrl->namespaces_rwsem); + mutex_lock(&ctrl->namespaces_lock); list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { - if (ns->head->ns_id > nsid) - list_move_tail(&ns->list, &rm_list); + if (ns->head->ns_id > nsid) { + list_del_rcu(&ns->list); + synchronize_srcu(&ctrl->srcu); + list_add_tail_rcu(&ns->list, &rm_list); + } } - up_write(&ctrl->namespaces_rwsem); + mutex_unlock(&ctrl->namespaces_lock); list_for_each_entry_safe(ns, next, &rm_list, list) nvme_ns_remove(ns); - } static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) @@ -3793,11 +4091,15 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) __le32 *ns_list; u32 prev = 0; int ret = 0, i; + ASYNC_DOMAIN(domain); + struct async_scan_info scan_info; ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); if (!ns_list) return -ENOMEM; + scan_info.ctrl = ctrl; + scan_info.ns_list = ns_list; for (;;) { struct nvme_command cmd = { .identify.opcode = nvme_admin_identify, @@ -3813,19 +4115,23 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) goto free; } + atomic_set(&scan_info.next_nsid, 0); for (i = 0; i < nr_entries; i++) { u32 nsid = le32_to_cpu(ns_list[i]); if (!nsid) /* end of the list? */ goto out; - nvme_scan_ns(ctrl, nsid); + async_schedule_domain(nvme_scan_ns_async, &scan_info, + &domain); while (++prev < nsid) nvme_ns_remove_by_nsid(ctrl, prev); } + async_synchronize_full_domain(&domain); } out: nvme_remove_invalid_namespaces(ctrl, prev); free: + async_synchronize_full_domain(&domain); kfree(ns_list); return ret; } @@ -3878,7 +4184,7 @@ static void nvme_scan_work(struct work_struct *work) int ret; /* No tagset on a live ctrl means IO queues could not created */ - if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) + if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset) return; /* @@ -3910,7 +4216,7 @@ static void nvme_scan_work(struct work_struct *work) * they report) but don't actually support it. */ ret = nvme_scan_ns_list(ctrl); - if (ret > 0 && ret & NVME_SC_DNR) + if (ret > 0 && ret & NVME_STATUS_DNR) nvme_scan_ns_sequential(ctrl); } mutex_unlock(&ctrl->scan_lock); @@ -3948,15 +4254,16 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) * removing the namespaces' disks; fail all the queues now to avoid * potentially having to clean up the failed sync later. */ - if (ctrl->state == NVME_CTRL_DEAD) + if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD) nvme_mark_namespaces_dead(ctrl); /* this is a no-op when called from the controller reset handler */ nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); - down_write(&ctrl->namespaces_rwsem); - list_splice_init(&ctrl->namespaces, &ns_list); - up_write(&ctrl->namespaces_rwsem); + mutex_lock(&ctrl->namespaces_lock); + list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu); + mutex_unlock(&ctrl->namespaces_lock); + synchronize_srcu(&ctrl->srcu); list_for_each_entry_safe(ns, next, &ns_list, list) nvme_ns_remove(ns); @@ -4030,7 +4337,7 @@ static void nvme_async_event_work(struct work_struct *work) * flushing ctrl async_event_work after changing the controller state * from LIVE and before freeing the admin queue. */ - if (ctrl->state == NVME_CTRL_LIVE) + if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE) ctrl->ops->submit_async_event(ctrl); } @@ -4051,14 +4358,30 @@ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) { struct nvme_fw_slot_info_log *log; + u8 next_fw_slot, cur_fw_slot; log = kmalloc(sizeof(*log), GFP_KERNEL); if (!log) return; if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, - log, sizeof(*log), 0)) + log, sizeof(*log), 0)) { dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); + goto out_free_log; + } + + cur_fw_slot = log->afi & 0x7; + next_fw_slot = (log->afi & 0x70) >> 4; + if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) { + dev_info(ctrl->device, + "Firmware is activated after next Controller Level Reset\n"); + goto out_free_log; + } + + memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1], + sizeof(ctrl->subsys->firmware_rev)); + +out_free_log: kfree(log); } @@ -4068,6 +4391,8 @@ static void nvme_fw_act_work(struct work_struct *work) struct nvme_ctrl, fw_act_work); unsigned long fw_act_timeout; + nvme_auth_stop(ctrl); + if (ctrl->mtfa) fw_act_timeout = jiffies + msecs_to_jiffies(ctrl->mtfa * 100); @@ -4123,7 +4448,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) * firmware activation. */ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { - nvme_auth_stop(ctrl); requeue = false; queue_work(nvme_wq, &ctrl->fw_act_work); } @@ -4146,7 +4470,8 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) { - dev_warn(ctrl->device, "resetting controller due to AER\n"); + dev_warn(ctrl->device, + "resetting controller due to persistent internal error\n"); nvme_reset_ctrl(ctrl); } @@ -4193,13 +4518,15 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event); int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, const struct blk_mq_ops *ops, unsigned int cmd_size) { + struct queue_limits lim = {}; int ret; memset(set, 0, sizeof(*set)); set->ops = ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; if (ctrl->ops->flags & NVME_F_FABRICS) - set->reserved_tags = NVMF_RESERVED_TAGS; + /* Reserved for fabric connect and keep alive */ + set->reserved_tags = 2; set->numa_node = ctrl->numa_node; set->flags = BLK_MQ_F_NO_SCHED; if (ctrl->ops->flags & NVME_F_BLOCKING) @@ -4212,14 +4539,14 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, if (ret) return ret; - ctrl->admin_q = blk_mq_init_queue(set); + ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL); if (IS_ERR(ctrl->admin_q)) { ret = PTR_ERR(ctrl->admin_q); goto out_free_tagset; } if (ctrl->ops->flags & NVME_F_FABRICS) { - ctrl->fabrics_q = blk_mq_init_queue(set); + ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL); if (IS_ERR(ctrl->fabrics_q)) { ret = PTR_ERR(ctrl->fabrics_q); goto out_cleanup_admin_q; @@ -4268,12 +4595,13 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) set->reserved_tags = NVME_AQ_DEPTH; else if (ctrl->ops->flags & NVME_F_FABRICS) - set->reserved_tags = NVMF_RESERVED_TAGS; + /* Reserved for fabric connect */ + set->reserved_tags = 1; set->numa_node = ctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE; if (ctrl->ops->flags & NVME_F_BLOCKING) set->flags |= BLK_MQ_F_BLOCKING; - set->cmd_size = cmd_size, + set->cmd_size = cmd_size; set->driver_data = ctrl; set->nr_hw_queues = ctrl->queue_count - 1; set->timeout = NVME_IO_TIMEOUT; @@ -4283,13 +4611,15 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, return ret; if (ctrl->ops->flags & NVME_F_FABRICS) { - ctrl->connect_q = blk_mq_init_queue(set); + struct queue_limits lim = { + .features = BLK_FEAT_SKIP_TAGSET_QUIESCE, + }; + + ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL); if (IS_ERR(ctrl->connect_q)) { ret = PTR_ERR(ctrl->connect_q); goto out_free_tag_set; } - blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, - ctrl->connect_q); } ctrl->tagset = set; @@ -4316,7 +4646,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) { nvme_mpath_stop(ctrl); nvme_auth_stop(ctrl); - nvme_stop_keep_alive(ctrl); nvme_stop_failfast_work(ctrl); flush_work(&ctrl->async_event_work); cancel_work_sync(&ctrl->fw_act_work); @@ -4327,8 +4656,6 @@ EXPORT_SYMBOL_GPL(nvme_stop_ctrl); void nvme_start_ctrl(struct nvme_ctrl *ctrl) { - nvme_start_keep_alive(ctrl); - nvme_enable_aen(ctrl); /* @@ -4354,6 +4681,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) { + nvme_stop_keep_alive(ctrl); nvme_hwmon_exit(ctrl); nvme_fault_inject_fini(&ctrl->fault_inject); dev_pm_qos_hide_latency_tolerance(ctrl->device); @@ -4383,9 +4711,9 @@ static void nvme_free_ctrl(struct device *dev) if (!subsys || ctrl->instance != subsys->instance) ida_free(&nvme_instance_ida, ctrl->instance); - nvme_free_cels(ctrl); nvme_mpath_uninit(ctrl); + cleanup_srcu_struct(&ctrl->srcu); nvme_auth_stop(ctrl); nvme_auth_free(ctrl); __free_page(ctrl->discard_page); @@ -4408,19 +4736,28 @@ static void nvme_free_ctrl(struct device *dev) * Initialize a NVMe controller structures. This needs to be called during * earliest initialization so that we have the initialized structured around * during probing. + * + * On success, the caller must use the nvme_put_ctrl() to release this when + * needed, which also invokes the ops->free_ctrl() callback. */ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, const struct nvme_ctrl_ops *ops, unsigned long quirks) { int ret; - ctrl->state = NVME_CTRL_NEW; + WRITE_ONCE(ctrl->state, NVME_CTRL_NEW); + ctrl->passthru_err_log_enabled = false; clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); spin_lock_init(&ctrl->lock); + mutex_init(&ctrl->namespaces_lock); + + ret = init_srcu_struct(&ctrl->srcu); + if (ret) + return ret; + mutex_init(&ctrl->scan_lock); INIT_LIST_HEAD(&ctrl->namespaces); xa_init(&ctrl->cels); - init_rwsem(&ctrl->namespaces_rwsem); ctrl->dev = dev; ctrl->ops = ops; ctrl->quirks = quirks; @@ -4435,6 +4772,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; + ctrl->ka_last_check_time = jiffies; BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > PAGE_SIZE); @@ -4449,11 +4787,17 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, goto out; ctrl->instance = ret; + ret = nvme_auth_init_ctrl(ctrl); + if (ret) + goto out_release_instance; + + nvme_mpath_init_ctrl(ctrl); + device_initialize(&ctrl->ctrl_device); ctrl->device = &ctrl->ctrl_device; ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), ctrl->instance); - ctrl->device->class = nvme_class; + ctrl->device->class = &nvme_class; ctrl->device->parent = ctrl->dev; if (ops->dev_attr_groups) ctrl->device->groups = ops->dev_attr_groups; @@ -4461,16 +4805,36 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->device->groups = nvme_dev_attr_groups; ctrl->device->release = nvme_free_ctrl; dev_set_drvdata(ctrl->device, ctrl); + + return ret; + +out_release_instance: + ida_free(&nvme_instance_ida, ctrl->instance); +out: + if (ctrl->discard_page) + __free_page(ctrl->discard_page); + cleanup_srcu_struct(&ctrl->srcu); + return ret; +} +EXPORT_SYMBOL_GPL(nvme_init_ctrl); + +/* + * On success, returns with an elevated controller reference and caller must + * use nvme_uninit_ctrl() to properly free resources associated with the ctrl. + */ +int nvme_add_ctrl(struct nvme_ctrl *ctrl) +{ + int ret; + ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); if (ret) - goto out_release_instance; + return ret; - nvme_get_ctrl(ctrl); cdev_init(&ctrl->cdev, &nvme_dev_fops); - ctrl->cdev.owner = ops->module; + ctrl->cdev.owner = ctrl->ops->module; ret = cdev_device_add(&ctrl->cdev, ctrl->device); if (ret) - goto out_free_name; + return ret; /* * Initialize latency tolerance controls. The sysfs files won't @@ -4481,62 +4845,50 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, min(default_ps_max_latency_us, (unsigned long)S32_MAX)); nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); - nvme_mpath_init_ctrl(ctrl); - ret = nvme_auth_init_ctrl(ctrl); - if (ret) - goto out_free_cdev; + nvme_get_ctrl(ctrl); return 0; -out_free_cdev: - nvme_fault_inject_fini(&ctrl->fault_inject); - dev_pm_qos_hide_latency_tolerance(ctrl->device); - cdev_device_del(&ctrl->cdev, ctrl->device); -out_free_name: - nvme_put_ctrl(ctrl); - kfree_const(ctrl->device->kobj.name); -out_release_instance: - ida_free(&nvme_instance_ida, ctrl->instance); -out: - if (ctrl->discard_page) - __free_page(ctrl->discard_page); - return ret; } -EXPORT_SYMBOL_GPL(nvme_init_ctrl); +EXPORT_SYMBOL_GPL(nvme_add_ctrl); /* let I/O to all namespaces fail in preparation for surprise removal */ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; + int srcu_idx; - down_read(&ctrl->namespaces_rwsem); - list_for_each_entry(ns, &ctrl->namespaces, list) + srcu_idx = srcu_read_lock(&ctrl->srcu); + list_for_each_entry_rcu(ns, &ctrl->namespaces, list) blk_mark_disk_dead(ns->disk); - up_read(&ctrl->namespaces_rwsem); + srcu_read_unlock(&ctrl->srcu, srcu_idx); } EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead); void nvme_unfreeze(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; + int srcu_idx; - down_read(&ctrl->namespaces_rwsem); - list_for_each_entry(ns, &ctrl->namespaces, list) + srcu_idx = srcu_read_lock(&ctrl->srcu); + list_for_each_entry_rcu(ns, &ctrl->namespaces, list) blk_mq_unfreeze_queue(ns->queue); - up_read(&ctrl->namespaces_rwsem); + srcu_read_unlock(&ctrl->srcu, srcu_idx); + clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); } EXPORT_SYMBOL_GPL(nvme_unfreeze); int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) { struct nvme_ns *ns; + int srcu_idx; - down_read(&ctrl->namespaces_rwsem); - list_for_each_entry(ns, &ctrl->namespaces, list) { + srcu_idx = srcu_read_lock(&ctrl->srcu); + list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); if (timeout <= 0) break; } - up_read(&ctrl->namespaces_rwsem); + srcu_read_unlock(&ctrl->srcu, srcu_idx); return timeout; } EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); @@ -4544,22 +4896,25 @@ EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); void nvme_wait_freeze(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; + int srcu_idx; - down_read(&ctrl->namespaces_rwsem); - list_for_each_entry(ns, &ctrl->namespaces, list) + srcu_idx = srcu_read_lock(&ctrl->srcu); + list_for_each_entry_rcu(ns, &ctrl->namespaces, list) blk_mq_freeze_queue_wait(ns->queue); - up_read(&ctrl->namespaces_rwsem); + srcu_read_unlock(&ctrl->srcu, srcu_idx); } EXPORT_SYMBOL_GPL(nvme_wait_freeze); void nvme_start_freeze(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; + int srcu_idx; - down_read(&ctrl->namespaces_rwsem); - list_for_each_entry(ns, &ctrl->namespaces, list) + set_bit(NVME_CTRL_FROZEN, &ctrl->flags); + srcu_idx = srcu_read_lock(&ctrl->srcu); + list_for_each_entry_rcu(ns, &ctrl->namespaces, list) blk_freeze_queue_start(ns->queue); - up_read(&ctrl->namespaces_rwsem); + srcu_read_unlock(&ctrl->srcu, srcu_idx); } EXPORT_SYMBOL_GPL(nvme_start_freeze); @@ -4602,11 +4957,12 @@ EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue); void nvme_sync_io_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; + int srcu_idx; - down_read(&ctrl->namespaces_rwsem); - list_for_each_entry(ns, &ctrl->namespaces, list) + srcu_idx = srcu_read_lock(&ctrl->srcu); + list_for_each_entry_rcu(ns, &ctrl->namespaces, list) blk_sync_queue(ns->queue); - up_read(&ctrl->namespaces_rwsem); + srcu_read_unlock(&ctrl->srcu, srcu_idx); } EXPORT_SYMBOL_GPL(nvme_sync_io_queues); @@ -4684,29 +5040,22 @@ static int __init nvme_core_init(void) if (result < 0) goto destroy_delete_wq; - nvme_class = class_create("nvme"); - if (IS_ERR(nvme_class)) { - result = PTR_ERR(nvme_class); + result = class_register(&nvme_class); + if (result) goto unregister_chrdev; - } - nvme_class->dev_uevent = nvme_class_uevent; - nvme_subsys_class = class_create("nvme-subsystem"); - if (IS_ERR(nvme_subsys_class)) { - result = PTR_ERR(nvme_subsys_class); + result = class_register(&nvme_subsys_class); + if (result) goto destroy_class; - } result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, "nvme-generic"); if (result < 0) goto destroy_subsys_class; - nvme_ns_chr_class = class_create("nvme-generic"); - if (IS_ERR(nvme_ns_chr_class)) { - result = PTR_ERR(nvme_ns_chr_class); + result = class_register(&nvme_ns_chr_class); + if (result) goto unregister_generic_ns; - } result = nvme_init_auth(); if (result) @@ -4714,13 +5063,13 @@ static int __init nvme_core_init(void) return 0; destroy_ns_chr: - class_destroy(nvme_ns_chr_class); + class_unregister(&nvme_ns_chr_class); unregister_generic_ns: unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); destroy_subsys_class: - class_destroy(nvme_subsys_class); + class_unregister(&nvme_subsys_class); destroy_class: - class_destroy(nvme_class); + class_unregister(&nvme_class); unregister_chrdev: unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); destroy_delete_wq: @@ -4736,9 +5085,9 @@ out: static void __exit nvme_core_exit(void) { nvme_exit_auth(); - class_destroy(nvme_ns_chr_class); - class_destroy(nvme_subsys_class); - class_destroy(nvme_class); + class_unregister(&nvme_ns_chr_class); + class_unregister(&nvme_subsys_class); + class_unregister(&nvme_class); unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); destroy_workqueue(nvme_delete_wq); @@ -4750,5 +5099,6 @@ static void __exit nvme_core_exit(void) MODULE_LICENSE("GPL"); MODULE_VERSION("1.0"); +MODULE_DESCRIPTION("NVMe host core framework"); module_init(nvme_core_init); module_exit(nvme_core_exit); |