summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-08-18 05:39:54 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-08-18 05:39:54 +0300
commit8fde2832bd0bdc5a2b57330a9e9c3d2fa16bd1d8 (patch)
treed25b713c9e9389c9a56938168d19426357abff9c /fs
parent85d8d3b172eb37b23dcdbe9fa7a85e343642bfea (diff)
parenta982eeb09b6030e567b8b815277c8c9197168040 (diff)
downloadlinux-8fde2832bd0bdc5a2b57330a9e9c3d2fa16bd1d8.tar.xz
Merge tag 'for-linus-2019-08-17' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A collection of fixes that should go into this series. This contains: - Revert of the REQ_NOWAIT_INLINE and associated dio changes. There were still corner cases there, and even though I had a solution for it, it's too involved for this stage. (me) - Set of NVMe fixes (via Sagi) - io_uring fix for fixed buffers (Anthony) - io_uring defer issue fix (Jackie) - Regression fix for queue sync at exit time (zhengbin) - xen blk-back memory leak fix (Wenwen)" * tag 'for-linus-2019-08-17' of git://git.kernel.dk/linux-block: io_uring: fix an issue when IOSQE_IO_LINK is inserted into defer list block: remove REQ_NOWAIT_INLINE io_uring: fix manual setup of iov_iter for fixed buffers xen/blkback: fix memory leaks blk-mq: move cancel of requeue_work to the front of blk_exit_queue nvme-pci: Fix async probe remove race nvme: fix controller removal race with scan work nvme-rdma: fix possible use-after-free in connect error flow nvme: fix a possible deadlock when passthru commands sent to a multipath device nvme-core: Fix extra device_put() call on error path nvmet-file: fix nvmet_file_flush() always returning an error nvmet-loop: Flush nvme_delete_wq when removing the port nvmet: Fix use-after-free bug when a port is removed nvme-multipath: revalidate nvme_ns_head gendisk in nvme_validate_ns
Diffstat (limited to 'fs')
-rw-r--r--fs/block_dev.c49
-rw-r--r--fs/io_uring.c20
2 files changed, 15 insertions, 54 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index eb657ab94060..677cb364d33f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -345,24 +345,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
struct bio *bio;
bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
- bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
- gfp_t gfp;
- int ret;
+ int ret = 0;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- if (nowait)
- gfp = GFP_NOWAIT;
- else
- gfp = GFP_KERNEL;
-
- bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
- if (!bio)
- return -EAGAIN;
+ bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -384,7 +375,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
if (!is_poll)
blk_start_plug(&plug);
- ret = 0;
for (;;) {
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9;
@@ -409,14 +399,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
task_io_account_write(bio->bi_iter.bi_size);
}
- /*
- * Tell underlying layer to not block for resource shortage.
- * And if we would have blocked, return error inline instead
- * of through the bio->bi_end_io() callback.
- */
- if (nowait)
- bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
-
+ dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size;
nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
@@ -428,13 +411,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
polled = true;
}
- dio->size += bio->bi_iter.bi_size;
qc = submit_bio(bio);
- if (qc == BLK_QC_T_EAGAIN) {
- dio->size -= bio->bi_iter.bi_size;
- ret = -EAGAIN;
- goto error;
- }
if (polled)
WRITE_ONCE(iocb->ki_cookie, qc);
@@ -455,19 +432,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
atomic_inc(&dio->ref);
}
- dio->size += bio->bi_iter.bi_size;
- qc = submit_bio(bio);
- if (qc == BLK_QC_T_EAGAIN) {
- dio->size -= bio->bi_iter.bi_size;
- ret = -EAGAIN;
- goto error;
- }
-
- bio = bio_alloc(gfp, nr_pages);
- if (!bio) {
- ret = -EAGAIN;
- goto error;
- }
+ submit_bio(bio);
+ bio = bio_alloc(GFP_KERNEL, nr_pages);
}
if (!is_poll)
@@ -487,7 +453,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
__set_current_state(TASK_RUNNING);
-out:
if (!ret)
ret = blk_status_to_errno(dio->bio.bi_status);
if (likely(!ret))
@@ -495,10 +460,6 @@ out:
bio_put(&dio->bio);
return ret;
-error:
- if (!is_poll)
- blk_finish_plug(&plug);
- goto out;
}
static ssize_t
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d542f1cf4428..24bbe3cb7ad4 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1097,10 +1097,8 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
iter->bvec = bvec + seg_skip;
iter->nr_segs -= seg_skip;
- iter->count -= (seg_skip << PAGE_SHIFT);
+ iter->count -= bvec->bv_len + offset;
iter->iov_offset = offset & ~PAGE_MASK;
- if (iter->iov_offset)
- iter->count -= iter->iov_offset;
}
}
@@ -2025,6 +2023,15 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
{
int ret;
+ ret = io_req_defer(ctx, req, s->sqe);
+ if (ret) {
+ if (ret != -EIOCBQUEUED) {
+ io_free_req(req);
+ io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ }
+ return 0;
+ }
+
ret = __io_submit_sqe(ctx, req, s, true);
if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
struct io_uring_sqe *sqe_copy;
@@ -2097,13 +2104,6 @@ err:
return;
}
- ret = io_req_defer(ctx, req, s->sqe);
- if (ret) {
- if (ret != -EIOCBQUEUED)
- goto err_req;
- return;
- }
-
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but