summaryrefslogtreecommitdiff
path: root/fs/btrfs/volumes.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2023-03-20 05:12:49 +0300
committerDavid Sterba <dsterba@suse.com>2023-04-17 19:01:23 +0300
commit4886ff7b50f6341539611a1503a6ad2e55b77c98 (patch)
tree0309674a8e0fef93be3b94b2ba7414400b4b9463 /fs/btrfs/volumes.c
parent4317ff0056bedfc472202bf4ccf72d51094d6ade (diff)
downloadlinux-4886ff7b50f6341539611a1503a6ad2e55b77c98.tar.xz
btrfs: introduce a new helper to submit write bio for repair
Both scrub and read-repair are utilizing a special repair writes that: - Only writes back to a single device Even for read-repair on RAID56, we only update the corrupted data stripe itself, not triggering the full RMW path. - Requires a valid @mirror_num For RAID56 case, only @mirror_num == 1 is valid. For non-RAID56 cases, we need @mirror_num to locate our stripe. - No data csum generation needed These two call sites still have some differences though: - Read-repair goes plain bio It doesn't need a full btrfs_bio, and goes submit_bio_wait(). - New scrub repair would go btrfs_bio To simplify both read and write path. So here this patch would: - Introduce a common helper, btrfs_map_repair_block() Due to the single device nature, we can use an on-stack btrfs_io_stripe to pass device and its physical bytenr. - Introduce a new interface, btrfs_submit_repair_bio(), for later scrub code This is for the incoming scrub code. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/volumes.c')
-rw-r--r--fs/btrfs/volumes.c73
1 files changed, 73 insertions, 0 deletions
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c201d72f798e..db6e15205be5 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -8019,3 +8019,76 @@ bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
return true;
}
+
+static void map_raid56_repair_block(struct btrfs_io_context *bioc,
+ struct btrfs_io_stripe *smap,
+ u64 logical)
+{
+ int data_stripes = nr_bioc_data_stripes(bioc);
+ int i;
+
+ for (i = 0; i < data_stripes; i++) {
+ u64 stripe_start = bioc->full_stripe_logical +
+ (i << BTRFS_STRIPE_LEN_SHIFT);
+
+ if (logical >= stripe_start &&
+ logical < stripe_start + BTRFS_STRIPE_LEN)
+ break;
+ }
+ ASSERT(i < data_stripes);
+ smap->dev = bioc->stripes[i].dev;
+ smap->physical = bioc->stripes[i].physical +
+ ((logical - bioc->full_stripe_logical) &
+ BTRFS_STRIPE_LEN_MASK);
+}
+
+/*
+ * Map a repair write into a single device.
+ *
+ * A repair write is triggered by read time repair or scrub, which would only
+ * update the contents of a single device.
+ * Not update any other mirrors nor go through RMW path.
+ *
+ * Callers should ensure:
+ *
+ * - Call btrfs_bio_counter_inc_blocked() first
+ * - The range does not cross stripe boundary
+ * - Has a valid @mirror_num passed in.
+ */
+int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
+ struct btrfs_io_stripe *smap, u64 logical,
+ u32 length, int mirror_num)
+{
+ struct btrfs_io_context *bioc = NULL;
+ u64 map_length = length;
+ int mirror_ret = mirror_num;
+ int ret;
+
+ ASSERT(mirror_num > 0);
+
+ ret = __btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length,
+ &bioc, smap, &mirror_ret, true);
+ if (ret < 0)
+ return ret;
+
+ /* The map range should not cross stripe boundary. */
+ ASSERT(map_length >= length);
+
+ /* Already mapped to single stripe. */
+ if (!bioc)
+ goto out;
+
+ /* Map the RAID56 multi-stripe writes to a single one. */
+ if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+ map_raid56_repair_block(bioc, smap, logical);
+ goto out;
+ }
+
+ ASSERT(mirror_num <= bioc->num_stripes);
+ smap->dev = bioc->stripes[mirror_num - 1].dev;
+ smap->physical = bioc->stripes[mirror_num - 1].physical;
+out:
+ btrfs_put_bioc(bioc);
+ ASSERT(smap->dev);
+ return 0;
+}