summaryrefslogtreecommitdiff
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-03-03 16:17:48 +0300
committerJens Axboe <axboe@kernel.dk>2019-03-06 19:42:54 +0300
commit05b700ba6003fd98c41314f390df36e2b893e167 (patch)
treeea1ae0f1e8ab9a6f4a4085e47842910043793377 /block/blk-merge.c
parente61750c84701310f5a99e1c2e59d77aad5f1da78 (diff)
downloadlinux-05b700ba6003fd98c41314f390df36e2b893e167.tar.xz
block: fix segment calculation for passthrough IO
blk_recount_segments() can be called in bio_add_pc_page() for calculating how many segments this bio will has after one page is added to this bio. If the resulted segment number is beyond the queue limit, the added page will be removed. The try-and-fix policy requires blk_recount_segments(__blk_recalc_rq_segments) to not consider the segment number limit. Unfortunately bvec_split_segs() does check this limit, and causes small segment number returned to bio_add_pc_page(), then page still may be added to the bio even though segment number limit becomes broken. Fixes this issue by not considering segment number limit when calcualting bio's segment number. Fixes: dcebd755926b ("block: use bio_for_each_bvec() to compute multi-page bvec count") Cc: Christoph Hellwig <hch@lst.de> Cc: Omar Sandoval <osandov@fb.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 22467f475ab4..1c9d4f0f96ea 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -180,7 +180,7 @@ static unsigned get_max_segment_size(struct request_queue *q,
*/
static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
unsigned *nsegs, unsigned *last_seg_size,
- unsigned *front_seg_size, unsigned *sectors)
+ unsigned *front_seg_size, unsigned *sectors, unsigned max_segs)
{
unsigned len = bv->bv_len;
unsigned total_len = 0;
@@ -190,7 +190,7 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
* Multi-page bvec may be too big to hold in one segment, so the
* current bvec has to be splitted as multiple segments.
*/
- while (len && new_nsegs + *nsegs < queue_max_segments(q)) {
+ while (len && new_nsegs + *nsegs < max_segs) {
seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
seg_size = min(seg_size, len);
@@ -240,6 +240,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bool do_split = true;
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
+ const unsigned max_segs = queue_max_segments(q);
bio_for_each_bvec(bv, bio, iter) {
/*
@@ -254,14 +255,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
* Consider this a new segment if we're splitting in
* the middle of this vector.
*/
- if (nsegs < queue_max_segments(q) &&
+ if (nsegs < max_segs &&
sectors < max_sectors) {
/* split in the middle of bvec */
bv.bv_len = (max_sectors - sectors) << 9;
bvec_split_segs(q, &bv, &nsegs,
&seg_size,
&front_seg_size,
- &sectors);
+ &sectors, max_segs);
}
goto split;
}
@@ -283,7 +284,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
continue;
}
new_segment:
- if (nsegs == queue_max_segments(q))
+ if (nsegs == max_segs)
goto split;
bvprv = bv;
@@ -296,7 +297,7 @@ new_segment:
if (nsegs == 1 && seg_size > front_seg_size)
front_seg_size = seg_size;
} else if (bvec_split_segs(q, &bv, &nsegs, &seg_size,
- &front_seg_size, &sectors)) {
+ &front_seg_size, &sectors, max_segs)) {
goto split;
}
}
@@ -415,7 +416,7 @@ new_segment:
bvprv = bv;
prev = 1;
bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
- &front_seg_size, NULL);
+ &front_seg_size, NULL, UINT_MAX);
}
bbio = bio;
}