summaryrefslogtreecommitdiff
path: root/drivers/md/raid0.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-11-24 06:21:01 +0400
committerKent Overstreet <kmo@daterainc.com>2013-11-24 10:33:57 +0400
commit20d0189b1012a37d2533a87fb451f7852f2418d1 (patch)
tree5ceaa6cfc0e1f1cec423c6c9f5de72d49f2d63a1 /drivers/md/raid0.c
parentee67891bf132612feb7b999ee1f3350b40867cb4 (diff)
downloadlinux-20d0189b1012a37d2533a87fb451f7852f2418d1.tar.xz
block: Introduce new bio_split()
The new bio_split() can split arbitrary bios - it's not restricted to single page bios, like the old bio_split() (previously renamed to bio_pair_split()). It also has different semantics - it doesn't allocate a struct bio_pair, leaving it up to the caller to handle completions. Then convert the existing bio_pair_split() users to the new bio_split() - and also nvme, which was open coding bio splitting. (We have to take that BUG_ON() out of bio_integrity_trim() because this bio_split() needs to use it, and there's no reason it has to be used on bios marked as cloned; BIO_CLONED doesn't seem to have clearly documented semantics anyways.) Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Vishal Verma <vishal.l.verma@intel.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Neil Brown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r--drivers/md/raid0.c77
1 files changed, 28 insertions, 49 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ea754dd1a5f5..407a99e46f69 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -513,65 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
static void raid0_make_request(struct mddev *mddev, struct bio *bio)
{
- unsigned int chunk_sects;
- sector_t sector_offset;
struct strip_zone *zone;
struct md_rdev *tmp_dev;
+ struct bio *split;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
return;
}
- chunk_sects = mddev->chunk_sectors;
- if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
+ do {
sector_t sector = bio->bi_iter.bi_sector;
- struct bio_pair *bp;
- /* Sanity check -- queue functions should prevent this happening */
- if (bio_multiple_segments(bio))
- goto bad_map;
- /* This is a one page bio that upper layers
- * refuse to split for us, so we need to split it.
- */
- if (likely(is_power_of_2(chunk_sects)))
- bp = bio_pair_split(bio, chunk_sects - (sector &
- (chunk_sects-1)));
- else
- bp = bio_pair_split(bio, chunk_sects -
- sector_div(sector, chunk_sects));
- raid0_make_request(mddev, &bp->bio1);
- raid0_make_request(mddev, &bp->bio2);
- bio_pair_release(bp);
- return;
- }
-
- sector_offset = bio->bi_iter.bi_sector;
- zone = find_zone(mddev->private, &sector_offset);
- tmp_dev = map_sector(mddev, zone, bio->bi_iter.bi_sector,
- &sector_offset);
- bio->bi_bdev = tmp_dev->bdev;
- bio->bi_iter.bi_sector = sector_offset + zone->dev_start +
- tmp_dev->data_offset;
-
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
- /* Just ignore it */
- bio_endio(bio, 0);
- return;
- }
-
- generic_make_request(bio);
- return;
-
-bad_map:
- printk("md/raid0:%s: make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n",
- mdname(mddev), chunk_sects / 2,
- (unsigned long long)bio->bi_iter.bi_sector,
- bio_sectors(bio) / 2);
+ unsigned chunk_sects = mddev->chunk_sectors;
+
+ unsigned sectors = chunk_sects -
+ (likely(is_power_of_2(chunk_sects))
+ ? (sector & (chunk_sects-1))
+ : sector_div(sector, chunk_sects));
+
+ if (sectors < bio_sectors(bio)) {
+ split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+ bio_chain(split, bio);
+ } else {
+ split = bio;
+ }
- bio_io_error(bio);
- return;
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+ split->bi_iter.bi_sector = sector + zone->dev_start +
+ tmp_dev->data_offset;
+
+ if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+ /* Just ignore it */
+ bio_endio(split, 0);
+ } else
+ generic_make_request(split);
+ } while (split != bio);
}
static void raid0_status(struct seq_file *seq, struct mddev *mddev)