summaryrefslogtreecommitdiff
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2017-06-12 18:29:41 +0300
committerDavid Sterba <dsterba@suse.com>2017-06-19 19:26:04 +0300
commitc5e4c3d7503453832444475641988ffa02b88b6d (patch)
tree31eecedf58d2ef195db606fefec63fbc5eaf1dfe /fs/btrfs/scrub.c
parent184f999e12152d1b1f284792ba4e82ef453ce7b7 (diff)
downloadlinux-c5e4c3d7503453832444475641988ffa02b88b6d.tar.xz
btrfs: sink gfp parameter to btrfs_io_bio_alloc
We can hardcode GFP_NOFS to btrfs_io_bio_alloc, although it means we change it back from GFP_KERNEL in scrub. I'd rather save a few stack bytes from not passing the gfp flags in the remaining, more imporatant, contexts and the bio allocating API now looks more consistent. Reviewed-by: Liu Bo <bo.li.liu@oracle.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 1e2dfea00b2f..58a249cd5adc 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1737,7 +1737,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
}
WARN_ON(!page->page);
- bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
+ bio = btrfs_io_bio_alloc(1);
bio->bi_bdev = page->dev->bdev;
bio_add_page(bio, page->page, PAGE_SIZE, 0);
@@ -1825,7 +1825,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
return -EIO;
}
- bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
+ bio = btrfs_io_bio_alloc(1);
bio->bi_bdev = page_bad->dev->bdev;
bio->bi_iter.bi_sector = page_bad->physical >> 9;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -1915,8 +1915,7 @@ again:
sbio->dev = sctx->wr_tgtdev;
bio = sbio->bio;
if (!bio) {
- bio = btrfs_io_bio_alloc(GFP_KERNEL,
- sctx->pages_per_wr_bio);
+ bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
sbio->bio = bio;
}
@@ -2316,8 +2315,7 @@ again:
sbio->dev = spage->dev;
bio = sbio->bio;
if (!bio) {
- bio = btrfs_io_bio_alloc(GFP_KERNEL,
- sctx->pages_per_rd_bio);
+ bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
sbio->bio = bio;
}
@@ -2443,7 +2441,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
goto bbio_out;
}
- bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
+ bio = btrfs_io_bio_alloc(0);
bio->bi_iter.bi_sector = logical >> 9;
bio->bi_private = sblock;
bio->bi_end_io = scrub_missing_raid56_end_io;
@@ -3019,7 +3017,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
if (ret || !bbio || !bbio->raid_map)
goto bbio_out;
- bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
+ bio = btrfs_io_bio_alloc(0);
bio->bi_iter.bi_sector = sparity->logic_start >> 9;
bio->bi_private = sparity;
bio->bi_end_io = scrub_parity_bio_endio;
@@ -4626,7 +4624,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
"scrub write_page_nocow(bdev == NULL) is unexpected");
return -EIO;
}
- bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
+ bio = btrfs_io_bio_alloc(1);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
bio->bi_bdev = dev->bdev;