summaryrefslogtreecommitdiff
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2023-02-17 08:37:03 +0300
committerDavid Sterba <dsterba@suse.com>2023-04-17 19:01:14 +0300
commit18d758a2d81a97b9a54a37d535870ce3170cc208 (patch)
tree19b31f1bd0b4917f8648a5cbfd96b4780056a89a /fs/btrfs/scrub.c
parent1faf3885067d5be65597d5dc682f0da505822104 (diff)
downloadlinux-18d758a2d81a97b9a54a37d535870ce3170cc208.tar.xz
btrfs: replace btrfs_io_context::raid_map with a fixed u64 value
In btrfs_io_context structure, we have a pointer raid_map, which indicates the logical bytenr for each stripe. But considering we always call sort_parity_stripes(), the result raid_map[] is always sorted, thus raid_map[0] is always the logical bytenr of the full stripe. So why we waste the space and time (for sorting) for raid_map? This patch will replace btrfs_io_context::raid_map with a single u64 number, full_stripe_start, by: - Replace btrfs_io_context::raid_map with full_stripe_start - Replace call sites using raid_map[0] to use full_stripe_start - Replace call sites using raid_map[i] to compare with nr_data_stripes. The benefits are: - Less memory wasted on raid_map It's sizeof(u64) * num_stripes vs sizeof(u64). It'll always save at least one u64, and the benefit grows larger with num_stripes. - No more weird alloc_btrfs_io_context() behavior As there is only one fixed size + one variable length array. Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 64b52be6bf0b..91aeac36ebc9 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1430,7 +1430,7 @@ static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
}
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
- u64 *raid_map,
+ u64 full_stripe_logical,
int nstripes, int mirror,
int *stripe_index,
u64 *stripe_offset)
@@ -1438,19 +1438,22 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
int i;
if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+ const int nr_data_stripes = (map_type & BTRFS_BLOCK_GROUP_RAID5) ?
+ nstripes - 1 : nstripes - 2;
+
/* RAID5/6 */
- for (i = 0; i < nstripes; i++) {
- if (raid_map[i] == RAID6_Q_STRIPE ||
- raid_map[i] == RAID5_P_STRIPE)
- continue;
+ for (i = 0; i < nr_data_stripes; i++) {
+ const u64 data_stripe_start = full_stripe_logical +
+ (i * BTRFS_STRIPE_LEN);
- if (logical >= raid_map[i] &&
- logical < raid_map[i] + BTRFS_STRIPE_LEN)
+ if (logical >= data_stripe_start &&
+ logical < data_stripe_start + BTRFS_STRIPE_LEN)
break;
}
*stripe_index = i;
- *stripe_offset = logical - raid_map[i];
+ *stripe_offset = (logical - full_stripe_logical) &
+ BTRFS_STRIPE_LEN_MASK;
} else {
/* The other RAID type */
*stripe_index = mirror;
@@ -1538,7 +1541,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
scrub_stripe_index_and_offset(logical,
bioc->map_type,
- bioc->raid_map,
+ bioc->full_stripe_logical,
bioc->num_stripes -
bioc->replace_nr_stripes,
mirror_index,
@@ -2398,7 +2401,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&length, &bioc);
- if (ret || !bioc || !bioc->raid_map)
+ if (ret || !bioc)
goto bioc_out;
if (WARN_ON(!sctx->is_dev_replace ||
@@ -3007,7 +3010,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
&length, &bioc);
- if (ret || !bioc || !bioc->raid_map)
+ if (ret || !bioc)
goto bioc_out;
bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);