summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/blk-flush.c11
-rw-r--r--block/blk-map.c150
-rw-r--r--block/blk-mq.c107
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/ioctl.c227
-rw-r--r--drivers/nvme/host/pci.c12
-rw-r--r--drivers/nvme/target/passthru.c5
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_ioctl.c22
-rw-r--r--drivers/scsi/sg.c31
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--drivers/ufs/core/ufshpb.c8
-rw-r--r--include/linux/blk-mq.h12
-rw-r--r--include/linux/io_uring.h10
-rw-r--r--include/uapi/linux/io_uring.h9
-rw-r--r--io_uring/uring_cmd.c29
18 files changed, 476 insertions, 181 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index d20a0c6b2c66..53202eff545e 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -205,7 +205,6 @@ static void blk_flush_complete_seq(struct request *rq,
* flush data request completion path. Restore @rq for
* normal completion and end it.
*/
- BUG_ON(!list_empty(&rq->queuelist));
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
blk_mq_end_request(rq, error);
@@ -218,7 +217,8 @@ static void blk_flush_complete_seq(struct request *rq,
blk_kick_flush(q, fq, cmd_flags);
}
-static void flush_end_io(struct request *flush_rq, blk_status_t error)
+static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
+ blk_status_t error)
{
struct request_queue *q = flush_rq->q;
struct list_head *running;
@@ -232,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
if (!req_ref_put_and_test(flush_rq)) {
fq->rq_status = error;
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
- return;
+ return RQ_END_IO_NONE;
}
blk_account_io_flush(flush_rq);
@@ -269,6 +269,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
}
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+ return RQ_END_IO_NONE;
}
bool is_flush_rq(struct request *rq)
@@ -354,7 +355,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
blk_flush_queue_rq(flush_rq, false);
}
-static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
+static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
+ blk_status_t error)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
@@ -376,6 +378,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
blk_mq_sched_restart(hctx);
+ return RQ_END_IO_NONE;
}
/**
diff --git a/block/blk-map.c b/block/blk-map.c
index 7693f8e3c454..34735626b00f 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -231,7 +231,7 @@ out_bmd:
return ret;
}
-static void bio_map_put(struct bio *bio)
+static void blk_mq_map_bio_put(struct bio *bio)
{
if (bio->bi_opf & REQ_ALLOC_CACHE) {
bio_put(bio);
@@ -241,17 +241,10 @@ static void bio_map_put(struct bio *bio)
}
}
-static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
- gfp_t gfp_mask)
+static struct bio *blk_rq_map_bio_alloc(struct request *rq,
+ unsigned int nr_vecs, gfp_t gfp_mask)
{
- unsigned int max_sectors = queue_max_hw_sectors(rq->q);
- unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
struct bio *bio;
- int ret;
- int j;
-
- if (!iov_iter_count(iter))
- return -EINVAL;
if (rq->cmd_flags & REQ_POLLED) {
blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
@@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
&fs_bio_set);
if (!bio)
- return -ENOMEM;
+ return NULL;
} else {
bio = bio_kmalloc(nr_vecs, gfp_mask);
if (!bio)
- return -ENOMEM;
+ return NULL;
bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
}
+ return bio;
+}
+
+static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
+ gfp_t gfp_mask)
+{
+ unsigned int max_sectors = queue_max_hw_sectors(rq->q);
+ unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
+ struct bio *bio;
+ int ret;
+ int j;
+
+ if (!iov_iter_count(iter))
+ return -EINVAL;
+
+ bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
+ if (bio == NULL)
+ return -ENOMEM;
while (iov_iter_count(iter)) {
struct page **pages, *stack_pages[UIO_FASTIOV];
@@ -331,7 +342,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
out_unmap:
bio_release_pages(bio, false);
- bio_map_put(bio);
+ blk_mq_map_bio_put(bio);
return ret;
}
@@ -537,6 +548,62 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
}
EXPORT_SYMBOL(blk_rq_append_bio);
+/* Prepare bio for passthrough IO given ITER_BVEC iter */
+static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
+{
+ struct request_queue *q = rq->q;
+ size_t nr_iter = iov_iter_count(iter);
+ size_t nr_segs = iter->nr_segs;
+ struct bio_vec *bvecs, *bvprvp = NULL;
+ struct queue_limits *lim = &q->limits;
+ unsigned int nsegs = 0, bytes = 0;
+ struct bio *bio;
+ size_t i;
+
+ if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
+ return -EINVAL;
+ if (nr_segs > queue_max_segments(q))
+ return -EINVAL;
+
+ /* no iovecs to alloc, as we already have a BVEC iterator */
+ bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
+ if (bio == NULL)
+ return -ENOMEM;
+
+ bio_iov_bvec_set(bio, (struct iov_iter *)iter);
+ blk_rq_bio_prep(rq, bio, nr_segs);
+
+ /* loop to perform a bunch of sanity checks */
+ bvecs = (struct bio_vec *)iter->bvec;
+ for (i = 0; i < nr_segs; i++) {
+ struct bio_vec *bv = &bvecs[i];
+
+ /*
+ * If the queue doesn't support SG gaps and adding this
+ * offset would create a gap, fallback to copy.
+ */
+ if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
+ blk_mq_map_bio_put(bio);
+ return -EREMOTEIO;
+ }
+ /* check full condition */
+ if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
+ goto put_bio;
+ if (bytes + bv->bv_len > nr_iter)
+ goto put_bio;
+ if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
+ goto put_bio;
+
+ nsegs++;
+ bytes += bv->bv_len;
+ bvprvp = bv;
+ }
+ return 0;
+put_bio:
+ blk_mq_map_bio_put(bio);
+ return -EINVAL;
+}
+
/**
* blk_rq_map_user_iov - map user data to a request, for passthrough requests
* @q: request queue where request should be inserted
@@ -556,24 +623,35 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask)
{
- bool copy = false;
+ bool copy = false, map_bvec = false;
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL;
struct iov_iter i;
int ret = -EINVAL;
- if (!iter_is_iovec(iter))
- goto fail;
-
if (map_data)
copy = true;
else if (blk_queue_may_bounce(q))
copy = true;
else if (iov_iter_alignment(iter) & align)
copy = true;
+ else if (iov_iter_is_bvec(iter))
+ map_bvec = true;
+ else if (!iter_is_iovec(iter))
+ copy = true;
else if (queue_virt_boundary(q))
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
+ if (map_bvec) {
+ ret = blk_rq_map_user_bvec(rq, iter);
+ if (!ret)
+ return 0;
+ if (ret != -EREMOTEIO)
+ goto fail;
+ /* fall back to copying the data on limits mismatches */
+ copy = true;
+ }
+
i = *iter;
do {
if (copy)
@@ -611,6 +689,42 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL(blk_rq_map_user);
+int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
+ void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
+ bool vec, int iov_count, bool check_iter_count, int rw)
+{
+ int ret = 0;
+
+ if (vec) {
+ struct iovec fast_iov[UIO_FASTIOV];
+ struct iovec *iov = fast_iov;
+ struct iov_iter iter;
+
+ ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
+ UIO_FASTIOV, &iov, &iter);
+ if (ret < 0)
+ return ret;
+
+ if (iov_count) {
+ /* SG_IO howto says that the shorter of the two wins */
+ iov_iter_truncate(&iter, buf_len);
+ if (check_iter_count && !iov_iter_count(&iter)) {
+ kfree(iov);
+ return -EINVAL;
+ }
+ }
+
+ ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
+ gfp_mask);
+ kfree(iov);
+ } else if (buf_len) {
+ ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
+ gfp_mask);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(blk_rq_map_user_io);
+
/**
* blk_rq_unmap_user - unmap a request with user data
* @bio: start of bio list
@@ -636,7 +750,7 @@ int blk_rq_unmap_user(struct bio *bio)
next_bio = bio;
bio = bio->bi_next;
- bio_map_put(next_bio);
+ blk_mq_map_bio_put(next_bio);
}
return ret;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 83492d942348..8070b6c10e8d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -510,25 +510,87 @@ retry:
alloc_time_ns);
}
-struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
- blk_mq_req_flags_t flags)
+static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
+ struct blk_plug *plug,
+ blk_opf_t opf,
+ blk_mq_req_flags_t flags)
{
struct blk_mq_alloc_data data = {
.q = q,
.flags = flags,
.cmd_flags = opf,
- .nr_tags = 1,
+ .nr_tags = plug->nr_ios,
+ .cached_rq = &plug->cached_rq,
};
struct request *rq;
- int ret;
- ret = blk_queue_enter(q, flags);
- if (ret)
- return ERR_PTR(ret);
+ if (blk_queue_enter(q, flags))
+ return NULL;
+
+ plug->nr_ios = 1;
rq = __blk_mq_alloc_requests(&data);
- if (!rq)
- goto out_queue_exit;
+ if (unlikely(!rq))
+ blk_queue_exit(q);
+ return rq;
+}
+
+static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
+ blk_opf_t opf,
+ blk_mq_req_flags_t flags)
+{
+ struct blk_plug *plug = current->plug;
+ struct request *rq;
+
+ if (!plug)
+ return NULL;
+ if (rq_list_empty(plug->cached_rq)) {
+ if (plug->nr_ios == 1)
+ return NULL;
+ rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
+ if (rq)
+ goto got_it;
+ return NULL;
+ }
+ rq = rq_list_peek(&plug->cached_rq);
+ if (!rq || rq->q != q)
+ return NULL;
+
+ if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
+ return NULL;
+ if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
+ return NULL;
+
+ plug->cached_rq = rq_list_next(rq);
+got_it:
+ rq->cmd_flags = opf;
+ INIT_LIST_HEAD(&rq->queuelist);
+ return rq;
+}
+
+struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
+ blk_mq_req_flags_t flags)
+{
+ struct request *rq;
+
+ rq = blk_mq_alloc_cached_request(q, opf, flags);
+ if (!rq) {
+ struct blk_mq_alloc_data data = {
+ .q = q,
+ .flags = flags,
+ .cmd_flags = opf,
+ .nr_tags = 1,
+ };
+ int ret;
+
+ ret = blk_queue_enter(q, flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ rq = __blk_mq_alloc_requests(&data);
+ if (!rq)
+ goto out_queue_exit;
+ }
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;
@@ -761,8 +823,10 @@ static void blk_complete_request(struct request *req)
* can find how many bytes remain in the request
* later.
*/
- req->bio = NULL;
- req->__data_len = 0;
+ if (!req->end_io) {
+ req->bio = NULL;
+ req->__data_len = 0;
+ }
}
/**
@@ -939,7 +1003,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
if (rq->end_io) {
rq_qos_done(rq->q, rq);
- rq->end_io(rq, error);
+ if (rq->end_io(rq, error) == RQ_END_IO_FREE)
+ blk_mq_free_request(rq);
} else {
blk_mq_free_request(rq);
}
@@ -992,6 +1057,13 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
rq_qos_done(rq->q, rq);
+ /*
+ * If end_io handler returns NONE, then it still has
+ * ownership of the request.
+ */
+ if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
+ continue;
+
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
if (!req_ref_put_and_test(rq))
continue;
@@ -1233,12 +1305,13 @@ struct blk_rq_wait {
blk_status_t ret;
};
-static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
+static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
{
struct blk_rq_wait *wait = rq->end_io_data;
wait->ret = ret;
complete(&wait->done);
+ return RQ_END_IO_NONE;
}
bool blk_rq_is_poll(struct request *rq)
@@ -1472,10 +1545,12 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
void blk_mq_put_rq_ref(struct request *rq)
{
- if (is_flush_rq(rq))
- rq->end_io(rq, 0);
- else if (req_ref_put_and_test(rq))
+ if (is_flush_rq(rq)) {
+ if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
+ blk_mq_free_request(rq);
+ } else if (req_ref_put_and_test(rq)) {
__blk_mq_free_request(rq);
+ }
}
static bool blk_mq_check_expired(struct request *rq, void *priv)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4f49bbcce4f1..3001b10a3fbf 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -292,11 +292,13 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
dm_complete_request(rq, error);
}
-static void end_clone_request(struct request *clone, blk_status_t error)
+static enum rq_end_io_ret end_clone_request(struct request *clone,
+ blk_status_t error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
dm_complete_request(tio->orig, error);
+ return RQ_END_IO_NONE;
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 64f599a64a7f..059737c1a2c1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1172,7 +1172,8 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
}
-static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
+static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+ blk_status_t status)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
@@ -1184,7 +1185,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
dev_err(ctrl->device,
"failed nvme_keep_alive_end_io error=%d\n",
status);
- return;
+ return RQ_END_IO_NONE;
}
ctrl->comp_seen = false;
@@ -1195,6 +1196,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
nvme_queue_keep_alive_work(ctrl);
+ return RQ_END_IO_NONE;
}
static void nvme_keep_alive_work(struct work_struct *work)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 357791ff0623..81f5550b670d 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -20,19 +20,20 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval)
return (void __user *)ptrval;
}
-static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
- unsigned len, u32 seed, bool write)
+static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
+ unsigned len, u32 seed)
{
struct bio_integrity_payload *bip;
int ret = -ENOMEM;
void *buf;
+ struct bio *bio = req->bio;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
goto out;
ret = -EFAULT;
- if (write && copy_from_user(buf, ubuf, len))
+ if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
goto out_free_meta;
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
@@ -45,9 +46,13 @@ static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
bip->bip_iter.bi_sector = seed;
ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
offset_in_page(buf));
- if (ret == len)
- return buf;
- ret = -ENOMEM;
+ if (ret != len) {
+ ret = -ENOMEM;
+ goto out_free_meta;
+ }
+
+ req->cmd_flags |= REQ_INTEGRITY;
+ return buf;
out_free_meta:
kfree(buf);
out:
@@ -65,74 +70,76 @@ static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
}
static struct request *nvme_alloc_user_request(struct request_queue *q,
- struct nvme_command *cmd, void __user *ubuffer,
- unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, void **metap, unsigned timeout, bool vec,
- blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
+ struct nvme_command *cmd, blk_opf_t rq_flags,
+ blk_mq_req_flags_t blk_flags)
{
- bool write = nvme_is_write(cmd);
- struct nvme_ns *ns = q->queuedata;
- struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
- struct bio *bio = NULL;
- void *meta = NULL;
- int ret;
req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
if (IS_ERR(req))
return req;
nvme_init_request(req, cmd);
-
- if (timeout)
- req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
+ return req;
+}
- if (ubuffer && bufflen) {
- if (!vec)
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
- GFP_KERNEL);
- else {
- struct iovec fast_iov[UIO_FASTIOV];
- struct iovec *iov = fast_iov;
- struct iov_iter iter;
-
- ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
- UIO_FASTIOV, &iov, &iter);
- if (ret < 0)
- goto out;
- ret = blk_rq_map_user_iov(q, req, NULL, &iter,
- GFP_KERNEL);
- kfree(iov);
- }
- if (ret)
+static int nvme_map_user_request(struct request *req, u64 ubuffer,
+ unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+ u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
+ bool vec)
+{
+ struct request_queue *q = req->q;
+ struct nvme_ns *ns = q->queuedata;
+ struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ struct bio *bio = NULL;
+ void *meta = NULL;
+ int ret;
+
+ if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+ struct iov_iter iter;
+
+ /* fixedbufs is only for non-vectored io */
+ if (WARN_ON_ONCE(vec))
+ return -EINVAL;
+ ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+ rq_data_dir(req), &iter, ioucmd);
+ if (ret < 0)
goto out;
- bio = req->bio;
- if (bdev)
- bio_set_dev(bio, bdev);
- if (bdev && meta_buffer && meta_len) {
- meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
- meta_seed, write);
- if (IS_ERR(meta)) {
- ret = PTR_ERR(meta);
- goto out_unmap;
- }
- req->cmd_flags |= REQ_INTEGRITY;
- *metap = meta;
+ ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
+ } else {
+ ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
+ bufflen, GFP_KERNEL, vec, 0, 0,
+ rq_data_dir(req));
+ }
+
+ if (ret)
+ goto out;
+ bio = req->bio;
+ if (bdev)
+ bio_set_dev(bio, bdev);
+
+ if (bdev && meta_buffer && meta_len) {
+ meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
+ meta_seed);
+ if (IS_ERR(meta)) {
+ ret = PTR_ERR(meta);
+ goto out_unmap;
}
+ *metap = meta;
}
- return req;
+ return ret;
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
out:
blk_mq_free_request(req);
- return ERR_PTR(ret);
+ return ret;
}
static int nvme_submit_user_cmd(struct request_queue *q,
- struct nvme_command *cmd, void __user *ubuffer,
+ struct nvme_command *cmd, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u64 *result, unsigned timeout, bool vec)
{
@@ -143,11 +150,18 @@ static int nvme_submit_user_cmd(struct request_queue *q,
u32 effects;
int ret;
- req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, &meta, timeout, vec, 0, 0);
+ req = nvme_alloc_user_request(q, cmd, 0, 0);
if (IS_ERR(req))
return PTR_ERR(req);
+ req->timeout = timeout;
+ if (ubuffer && bufflen) {
+ ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
+ meta_len, meta_seed, &meta, NULL, vec);
+ if (ret)
+ return ret;
+ }
+
bio = req->bio;
ctrl = nvme_req(req)->ctrl;
@@ -227,7 +241,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.appmask = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c,
- nvme_to_user_ptr(io.addr), length,
+ io.addr, length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
false);
}
@@ -281,7 +295,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ cmd.addr, cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout, false);
@@ -327,7 +341,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ cmd.addr, cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout, vec);
@@ -356,9 +370,15 @@ struct nvme_uring_cmd_pdu {
struct bio *bio;
struct request *req;
};
- void *meta; /* kernel-resident buffer */
- void __user *meta_buffer;
u32 meta_len;
+ u32 nvme_status;
+ union {
+ struct {
+ void *meta; /* kernel-resident buffer */
+ void __user *meta_buffer;
+ };
+ u64 result;
+ } u;
};
static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
@@ -367,11 +387,10 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
struct request *req = pdu->req;
- struct bio *bio = req->bio;
int status;
u64 result;
@@ -382,26 +401,39 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
result = le64_to_cpu(nvme_req(req)->result.u64);
- if (pdu->meta)
- status = nvme_finish_user_metadata(req, pdu->meta_buffer,
- pdu->meta, pdu->meta_len, status);
- if (bio)
- blk_rq_unmap_user(bio);
+ if (pdu->meta_len)
+ status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
+ pdu->u.meta, pdu->meta_len, status);
+ if (req->bio)
+ blk_rq_unmap_user(req->bio);
blk_mq_free_request(req);
io_uring_cmd_done(ioucmd, status, result);
}
-static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+
+ io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ blk_status_t err)
{
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- /* extract bio before reusing the same field for request */
- struct bio *bio = pdu->bio;
void *cookie = READ_ONCE(ioucmd->cookie);
- pdu->req = req;
- req->bio = bio;
+ req->bio = pdu->bio;
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ pdu->nvme_status = -EINTR;
+ else
+ pdu->nvme_status = nvme_req(req)->status;
+ pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
/*
* For iopoll, complete it directly.
@@ -411,6 +443,30 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
nvme_uring_task_cb(ioucmd);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+
+ return RQ_END_IO_FREE;
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
+ blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ void *cookie = READ_ONCE(ioucmd->cookie);
+
+ req->bio = pdu->bio;
+ pdu->req = req;
+
+ /*
+ * For iopoll, complete it directly.
+ * Otherwise, move the completion to task work.
+ */
+ if (cookie != NULL && blk_rq_is_poll(req))
+ nvme_uring_task_meta_cb(ioucmd);
+ else
+ io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
+
+ return RQ_END_IO_NONE;
}
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
@@ -425,6 +481,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
blk_opf_t rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
void *meta = NULL;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -464,15 +521,18 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
rq_flags |= REQ_POLLED;
retry:
- req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
- d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, 0, &meta, d.timeout_ms ?
- msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags,
- blk_flags);
+ req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
- req->end_io = nvme_uring_cmd_end_io;
- req->end_io_data = ioucmd;
+ req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
+
+ if (d.addr && d.data_len) {
+ ret = nvme_map_user_request(req, d.addr,
+ d.data_len, nvme_to_user_ptr(d.metadata),
+ d.metadata_len, 0, &meta, ioucmd, vec);
+ if (ret)
+ return ret;
+ }
if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
if (unlikely(!req->bio)) {
@@ -487,10 +547,15 @@ retry:
}
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
- pdu->meta = meta;
- pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
pdu->meta_len = d.metadata_len;
-
+ req->end_io_data = ioucmd;
+ if (pdu->meta_len) {
+ pdu->u.meta = meta;
+ pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
+ req->end_io = nvme_uring_cmd_end_io_meta;
+ } else {
+ req->end_io = nvme_uring_cmd_end_io;
+ }
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9aafc1ed6439..5b796efa325b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1268,7 +1268,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
-static void abort_endio(struct request *req, blk_status_t error)
+static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
@@ -1276,6 +1276,7 @@ static void abort_endio(struct request *req, blk_status_t error)
"Abort status: 0x%x", nvme_req(req)->status);
atomic_inc(&nvmeq->dev->ctrl.abort_limit);
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
@@ -2447,22 +2448,25 @@ out_unlock:
return result;
}
-static void nvme_del_queue_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
+ blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
blk_mq_free_request(req);
complete(&nvmeq->delete_done);
+ return RQ_END_IO_NONE;
}
-static void nvme_del_cq_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
+ blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
if (error)
set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
- nvme_del_queue_end(req, error);
+ return nvme_del_queue_end(req, error);
}
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 94d3153bae54..79af5140af8b 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -245,14 +245,15 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
nvme_passthru_end(ctrl, effects, req->cmd, status);
}
-static void nvmet_passthru_req_done(struct request *rq,
- blk_status_t blk_status)
+static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
+ blk_status_t blk_status)
{
struct nvmet_req *req = rq->end_io_data;
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, nvme_req(rq)->status);
blk_mq_free_request(rq);
+ return RQ_END_IO_NONE;
}
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 448748e3fba5..786fb963cf3f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2004,9 +2004,11 @@ maybe_retry:
}
}
-static void eh_lock_door_done(struct request *req, blk_status_t status)
+static enum rq_end_io_ret eh_lock_door_done(struct request *req,
+ blk_status_t status)
{
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
/**
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 729e309e6034..2d20da55fb64 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -449,25 +449,9 @@ static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
if (ret < 0)
goto out_put_request;
- ret = 0;
- if (hdr->iovec_count && hdr->dxfer_len) {
- struct iov_iter i;
- struct iovec *iov = NULL;
-
- ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
- hdr->iovec_count, 0, &iov, &i);
- if (ret < 0)
- goto out_put_request;
-
- /* SG_IO howto says that the shorter of the two wins */
- iov_iter_truncate(&i, hdr->dxfer_len);
-
- ret = blk_rq_map_user_iov(rq->q, rq, NULL, &i, GFP_KERNEL);
- kfree(iov);
- } else if (hdr->dxfer_len)
- ret = blk_rq_map_user(rq->q, rq, NULL, hdr->dxferp,
- hdr->dxfer_len, GFP_KERNEL);
-
+ ret = blk_rq_map_user_io(rq, NULL, hdr->dxferp, hdr->dxfer_len,
+ GFP_KERNEL, hdr->iovec_count && hdr->dxfer_len,
+ hdr->iovec_count, 0, rq_data_dir(rq));
if (ret)
goto out_put_request;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 340b050ad28d..ce34a8ad53b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -177,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
} Sg_device;
/* tasklet or soft irq callback */
-static void sg_rq_end_io(struct request *rq, blk_status_t status);
+static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -1311,7 +1311,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
-static void
+static enum rq_end_io_ret
sg_rq_end_io(struct request *rq, blk_status_t status)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -1324,11 +1324,11 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
int result, resid, done = 1;
if (WARN_ON(srp->done != 0))
- return;
+ return RQ_END_IO_NONE;
sfp = srp->parentfp;
if (WARN_ON(sfp == NULL))
- return;
+ return RQ_END_IO_NONE;
sdp = sfp->parentdp;
if (unlikely(atomic_read(&sdp->detaching)))
@@ -1406,6 +1406,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
schedule_work(&srp->ew.work);
}
+ return RQ_END_IO_NONE;
}
static const struct file_operations sg_fops = {
@@ -1803,26 +1804,8 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md->from_user = 0;
}
- if (iov_count) {
- struct iovec *iov = NULL;
- struct iov_iter i;
-
- res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
- if (res < 0)
- return res;
-
- iov_iter_truncate(&i, hp->dxfer_len);
- if (!iov_iter_count(&i)) {
- kfree(iov);
- return -EINVAL;
- }
-
- res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
- kfree(iov);
- } else
- res = blk_rq_map_user(q, rq, md, hp->dxferp,
- hp->dxfer_len, GFP_ATOMIC);
-
+ res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len,
+ GFP_ATOMIC, iov_count, iov_count, 1, rw);
if (!res) {
srp->bio = rq->bio;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 850172a2b8f1..55e7c07ebe4c 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -512,7 +512,8 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
atomic64_dec(&STp->stats->in_flight);
}
-static void st_scsi_execute_end(struct request *req, blk_status_t status)
+static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
+ blk_status_t status)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
struct st_request *SRpnt = req->end_io_data;
@@ -532,6 +533,7 @@ static void st_scsi_execute_end(struct request *req, blk_status_t status)
blk_rq_unmap_user(tmp);
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index e6a967ddc08c..8a7306e5e133 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -39,7 +39,7 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
}
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
-static void pscsi_req_done(struct request *, blk_status_t);
+static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
/* pscsi_attach_hba():
*
@@ -1002,7 +1002,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
return 0;
}
-static void pscsi_req_done(struct request *req, blk_status_t status)
+static enum rq_end_io_ret pscsi_req_done(struct request *req,
+ blk_status_t status)
{
struct se_cmd *cmd = req->end_io_data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
@@ -1029,6 +1030,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
}
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static const struct target_backend_ops pscsi_ops = {
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index a1a7a1175a5a..3d69a81c5b17 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -613,14 +613,17 @@ static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
srgn->srgn_state = HPB_SRGN_VALID;
}
-static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
+static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
+ blk_status_t error)
{
struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
ufshpb_put_req(umap_req->hpb, umap_req);
+ return RQ_END_IO_NONE;
}
-static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
+static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
+ blk_status_t error)
{
struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
struct ufshpb_lu *hpb = map_req->hpb;
@@ -636,6 +639,7 @@ static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
ufshpb_put_map_req(map_req->hpb, map_req);
+ return RQ_END_IO_NONE;
}
static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 00a15808c137..ba18e9bdb799 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -14,7 +14,12 @@ struct blk_flush_queue;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_DEFAULT_RQ 128
-typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+enum rq_end_io_ret {
+ RQ_END_IO_NONE,
+ RQ_END_IO_FREE,
+};
+
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
/*
* request flags */
@@ -848,8 +853,9 @@ static inline bool blk_mq_add_to_batch(struct request *req,
struct io_comp_batch *iob, int ioerror,
void (*complete)(struct io_comp_batch *))
{
- if (!iob || (req->rq_flags & RQF_ELV) || req->end_io || ioerror)
+ if (!iob || (req->rq_flags & RQF_ELV) || ioerror)
return false;
+
if (!iob->complete)
iob->complete = complete;
else if (iob->complete != complete)
@@ -979,6 +985,8 @@ struct rq_map_data {
int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long, gfp_t);
+int blk_rq_map_user_io(struct request *, struct rq_map_data *,
+ void __user *, unsigned long, gfp_t, bool, int, bool, int);
int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *, gfp_t);
int blk_rq_unmap_user(struct bio *);
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 58676c0a398f..43bc8a2edccf 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/xarray.h>
+#include <uapi/linux/io_uring.h>
enum io_uring_cmd_flags {
IO_URING_F_COMPLETE_DEFER = 1,
@@ -27,11 +28,13 @@ struct io_uring_cmd {
void *cookie;
};
u32 cmd_op;
- u32 pad;
+ u32 flags;
u8 pdu[32]; /* available inline for free use */
};
#if defined(CONFIG_IO_URING)
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd);
void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2);
void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
void (*task_work_cb)(struct io_uring_cmd *));
@@ -59,6 +62,11 @@ static inline void io_uring_free(struct task_struct *tsk)
__io_uring_free(tsk);
}
#else
+static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd)
+{
+ return -EOPNOTSUPP;
+}
static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
ssize_t ret2)
{
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 92f29d9505a6..ab7458033ee3 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -56,6 +56,7 @@ struct io_uring_sqe {
__u32 hardlink_flags;
__u32 xattr_flags;
__u32 msg_ring_flags;
+ __u32 uring_cmd_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
@@ -220,6 +221,14 @@ enum io_uring_op {
};
/*
+ * sqe->uring_cmd_flags
+ * IORING_URING_CMD_FIXED use registered buffer; pass thig flag
+ * along with setting sqe->buf_index.
+ */
+#define IORING_URING_CMD_FIXED (1U << 0)
+
+
+/*
* sqe->fsync_flags
*/
#define IORING_FSYNC_DATASYNC (1U << 0)
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index f3ed61e9bd0f..e50de0b6b9f8 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -4,10 +4,12 @@
#include <linux/file.h>
#include <linux/io_uring.h>
#include <linux/security.h>
+#include <linux/nospec.h>
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
+#include "rsrc.h"
#include "uring_cmd.h"
static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
@@ -76,8 +78,24 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- if (sqe->rw_flags || sqe->__pad1)
+ if (sqe->__pad1)
return -EINVAL;
+
+ ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
+ if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
+ return -EINVAL;
+
+ if (ioucmd->flags & IORING_URING_CMD_FIXED) {
+ struct io_ring_ctx *ctx = req->ctx;
+ u16 index;
+
+ req->buf_index = READ_ONCE(sqe->buf_index);
+ if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+ return -EFAULT;
+ index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
+ req->imu = ctx->user_bufs[index];
+ io_req_set_rsrc_node(req, ctx, 0);
+ }
ioucmd->cmd = sqe->cmd;
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
return 0;
@@ -129,3 +147,12 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
return IOU_ISSUE_SKIP_COMPLETE;
}
+
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+
+ return io_import_fixed(rw, iter, req->imu, ubuf, len);
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);