summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/ioctl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/ioctl.c')
-rw-r--r--drivers/nvme/host/ioctl.c243
1 files changed, 63 insertions, 180 deletions
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index d39f3219358b..b9b79ccfabf8 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -3,9 +3,10 @@
* Copyright (c) 2011-2014, Intel Corporation.
* Copyright (c) 2017-2021 Christoph Hellwig.
*/
+#include <linux/blk-integrity.h>
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
-#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
#include "nvme.h"
enum {
@@ -18,15 +19,12 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
{
u32 effects;
- if (capable(CAP_SYS_ADMIN))
- return true;
-
/*
* Do not allow unprivileged passthrough on partitions, as that allows an
* escape from the containment of the partition.
*/
if (flags & NVME_IOCTL_PARTITION)
- return false;
+ goto admin;
/*
* Do not allow unprivileged processes to send vendor specific or fabrics
@@ -34,7 +32,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
*/
if (c->common.opcode >= nvme_cmd_vendor_start ||
c->common.opcode == nvme_fabrics_command)
- return false;
+ goto admin;
/*
* Do not allow unprivileged passthrough of admin commands except
@@ -53,7 +51,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
return true;
}
}
- return false;
+ goto admin;
}
/*
@@ -63,7 +61,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
*/
effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
if (!(effects & NVME_CMD_EFFECTS_CSUPP))
- return false;
+ goto admin;
/*
* Don't allow passthrough for command that have intrusive (or unknown)
@@ -72,16 +70,20 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
NVME_CMD_EFFECTS_UUID_SEL |
NVME_CMD_EFFECTS_SCOPE_MASK))
- return false;
+ goto admin;
/*
* Only allow I/O commands that transfer data to the controller or that
* change the logical block contents if the file descriptor is open for
* writing.
*/
- if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
- return open_for_write;
+ if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
+ !open_for_write)
+ goto admin;
+
return true;
+admin:
+ return capable(CAP_SYS_ADMIN);
}
/*
@@ -96,55 +98,6 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval)
return (void __user *)ptrval;
}
-static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
- unsigned len, u32 seed)
-{
- struct bio_integrity_payload *bip;
- int ret = -ENOMEM;
- void *buf;
- struct bio *bio = req->bio;
-
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- goto out;
-
- ret = -EFAULT;
- if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
- goto out_free_meta;
-
- bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
- if (IS_ERR(bip)) {
- ret = PTR_ERR(bip);
- goto out_free_meta;
- }
-
- bip->bip_iter.bi_size = len;
- bip->bip_iter.bi_sector = seed;
- ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
- offset_in_page(buf));
- if (ret != len) {
- ret = -ENOMEM;
- goto out_free_meta;
- }
-
- req->cmd_flags |= REQ_INTEGRITY;
- return buf;
-out_free_meta:
- kfree(buf);
-out:
- return ERR_PTR(ret);
-}
-
-static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
- void *meta, unsigned len, int ret)
-{
- if (!ret && req_op(req) == REQ_OP_DRV_IN &&
- copy_to_user(ubuf, meta, len))
- ret = -EFAULT;
- kfree(meta);
- return ret;
-}
-
static struct request *nvme_alloc_user_request(struct request_queue *q,
struct nvme_command *cmd, blk_opf_t rq_flags,
blk_mq_req_flags_t blk_flags)
@@ -161,16 +114,19 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
- unsigned int flags)
+ u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
+ bool has_metadata = meta_buffer && meta_len;
struct bio *bio = NULL;
- void *meta = NULL;
int ret;
+ if (has_metadata && !supports_metadata)
+ return -EINVAL;
+
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
struct iov_iter iter;
@@ -190,18 +146,16 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (ret)
goto out;
+
bio = req->bio;
if (bdev)
bio_set_dev(bio, bdev);
- if (bdev && meta_buffer && meta_len) {
- meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
- meta_seed);
- if (IS_ERR(meta)) {
- ret = PTR_ERR(meta);
+ if (has_metadata) {
+ ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len,
+ meta_seed);
+ if (ret)
goto out_unmap;
- }
- *metap = meta;
}
return ret;
@@ -222,7 +176,6 @@ static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl;
struct request *req;
- void *meta = NULL;
struct bio *bio;
u32 effects;
int ret;
@@ -234,7 +187,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, &meta, NULL, flags);
+ meta_len, meta_seed, NULL, flags);
if (ret)
return ret;
}
@@ -246,9 +199,6 @@ static int nvme_submit_user_cmd(struct request_queue *q,
ret = nvme_execute_rq(req, false);
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
- if (meta)
- ret = nvme_finish_user_metadata(req, meta_buffer, meta,
- meta_len, ret);
if (bio)
blk_rq_unmap_user(bio);
blk_mq_free_request(req);
@@ -280,10 +230,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return -EINVAL;
}
- length = (io.nblocks + 1) << ns->lba_shift;
+ length = (io.nblocks + 1) << ns->head->lba_shift;
if ((io.control & NVME_RW_PRINFO_PRACT) &&
- ns->ms == sizeof(struct t10_pi_tuple)) {
+ (ns->head->ms == ns->head->pi_size)) {
/*
* Protection information is stripped/inserted by the
* controller.
@@ -293,11 +243,11 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
meta_len = 0;
metadata = NULL;
} else {
- meta_len = (io.nblocks + 1) * ns->ms;
+ meta_len = (io.nblocks + 1) * ns->head->ms;
metadata = nvme_to_user_ptr(io.metadata);
}
- if (ns->features & NVME_NS_EXT_LBAS) {
+ if (ns->head->features & NVME_NS_EXT_LBAS) {
length += meta_len;
meta_len = 0;
} else if (meta_len) {
@@ -314,8 +264,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.control = cpu_to_le16(io.control);
c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
c.rw.reftag = cpu_to_le32(io.reftag);
- c.rw.apptag = cpu_to_le16(io.apptag);
- c.rw.appmask = cpu_to_le16(io.appmask);
+ c.rw.lbat = cpu_to_le16(io.apptag);
+ c.rw.lbatm = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
meta_len, lower_32_bits(io.slba), NULL, 0, 0);
@@ -443,19 +393,10 @@ struct nvme_uring_data {
* Expect build errors if this grows larger than that.
*/
struct nvme_uring_cmd_pdu {
- union {
- struct bio *bio;
- struct request *req;
- };
- u32 meta_len;
- u32 nvme_status;
- union {
- struct {
- void *meta; /* kernel-resident buffer */
- void __user *meta_buffer;
- };
- u64 result;
- } u;
+ struct request *req;
+ struct bio *bio;
+ u64 result;
+ int status;
};
static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
@@ -464,31 +405,6 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
- unsigned issue_flags)
-{
- struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- struct request *req = pdu->req;
- int status;
- u64 result;
-
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- status = -EINTR;
- else
- status = nvme_req(req)->status;
-
- result = le64_to_cpu(nvme_req(req)->result.u64);
-
- if (pdu->meta_len)
- status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
- pdu->u.meta, pdu->meta_len, status);
- if (req->bio)
- blk_rq_unmap_user(req->bio);
- blk_mq_free_request(req);
-
- io_uring_cmd_done(ioucmd, status, result, issue_flags);
-}
-
static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
unsigned issue_flags)
{
@@ -496,8 +412,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
-
- io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
+ io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
@@ -506,20 +421,24 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- req->bio = pdu->bio;
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- pdu->nvme_status = -EINTR;
+ pdu->status = -EINTR;
else
- pdu->nvme_status = nvme_req(req)->status;
- pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+ pdu->status = nvme_req(req)->status;
+ pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
- * For iopoll, complete it directly.
+ * For iopoll, complete it directly. Note that using the uring_cmd
+ * helper for this is safe only because we check blk_rq_is_poll().
+ * As that returns false if we're NOT on a polled queue, then it's
+ * safe to use the polled completion helper.
+ *
* Otherwise, move the completion to task work.
*/
if (blk_rq_is_poll(req)) {
- WRITE_ONCE(ioucmd->cookie, NULL);
- nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+ io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
} else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
}
@@ -527,29 +446,6 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
return RQ_END_IO_FREE;
}
-static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
- blk_status_t err)
-{
- struct io_uring_cmd *ioucmd = req->end_io_data;
- struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
-
- req->bio = pdu->bio;
- pdu->req = req;
-
- /*
- * For iopoll, complete it directly.
- * Otherwise, move the completion to task work.
- */
- if (blk_rq_is_poll(req)) {
- WRITE_ONCE(ioucmd->cookie, NULL);
- nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
- } else {
- io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb);
- }
-
- return RQ_END_IO_NONE;
-}
-
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
{
@@ -561,7 +457,6 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct request *req;
blk_opf_t rq_flags = REQ_ALLOC_CACHE;
blk_mq_req_flags_t blk_flags = 0;
- void *meta = NULL;
int ret;
c.common.opcode = READ_ONCE(cmd->opcode);
@@ -609,27 +504,16 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (d.addr && d.data_len) {
ret = nvme_map_user_request(req, d.addr,
d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, 0, &meta, ioucmd, vec);
+ d.metadata_len, 0, ioucmd, vec);
if (ret)
return ret;
}
- if (blk_rq_is_poll(req)) {
- ioucmd->flags |= IORING_URING_CMD_POLLED;
- WRITE_ONCE(ioucmd->cookie, req);
- }
-
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
- pdu->meta_len = d.metadata_len;
+ pdu->req = req;
req->end_io_data = ioucmd;
- if (pdu->meta_len) {
- pdu->u.meta = meta;
- pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
- req->end_io = nvme_uring_cmd_end_io_meta;
- } else {
- req->end_io = nvme_uring_cmd_end_io;
- }
+ req->end_io = nvme_uring_cmd_end_io;
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
}
@@ -780,16 +664,12 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob,
unsigned int poll_flags)
{
- struct request *req;
- int ret = 0;
-
- if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
- return 0;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ struct request *req = pdu->req;
- req = READ_ONCE(ioucmd->cookie);
if (req && blk_rq_is_poll(req))
- ret = blk_rq_poll(req, iob, poll_flags);
- return ret;
+ return blk_rq_poll(req, iob, poll_flags);
+ return 0;
}
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
@@ -914,15 +794,15 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
bool open_for_write)
{
struct nvme_ns *ns;
- int ret;
+ int ret, srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
if (list_empty(&ctrl->namespaces)) {
ret = -ENOTTY;
goto out_unlock;
}
- ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
+ ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list);
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
dev_warn(ctrl->device,
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
@@ -932,15 +812,18 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
- kref_get(&ns->kref);
- up_read(&ctrl->namespaces_rwsem);
+ if (!nvme_get_ns(ns)) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
nvme_put_ns(ns);
return ret;
out_unlock:
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return ret;
}