summaryrefslogtreecommitdiff
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-06-06 13:29:03 +0300
committerJens Axboe <axboe@kernel.dk>2019-06-20 19:29:22 +0300
commitd627065d88469933bc1527f97c539c464482f0bb (patch)
tree1181e2d38ccd8e9340645ce2205456e88be58a6f /block/blk-merge.c
parente9cd19c0c198aa1c893e142b015fde6da862ed52 (diff)
downloadlinux-d627065d88469933bc1527f97c539c464482f0bb.tar.xz
block: untangle the end of blk_bio_segment_split
Now that we don't need to assign the front/back segment sizes, we can duplicating the segs assignment for the split vs no-split case and remove a whole chunk of boilerplate code. Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2ea21ffd5f72..ca45eb51c669 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -202,8 +202,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
unsigned nsegs = 0, sectors = 0;
- bool do_split = true;
- struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
const unsigned max_segs = queue_max_segments(q);
@@ -245,17 +243,11 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
}
}
- do_split = false;
+ *segs = nsegs;
+ return NULL;
split:
*segs = nsegs;
-
- if (do_split) {
- new = bio_split(bio, sectors, GFP_NOIO, bs);
- if (new)
- bio = new;
- }
-
- return do_split ? new : NULL;
+ return bio_split(bio, sectors, GFP_NOIO, bs);
}
void __blk_queue_split(struct request_queue *q, struct bio **bio,