summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2020-07-08 23:55:14 +0300
committerDavid Sterba <dsterba@suse.com>2020-07-27 13:55:43 +0300
commitd85327b1d8b74ec168ed34c798a424de82fdc001 (patch)
tree02578885884b3ad423e47ec7f012d3975a4d8de5 /fs/btrfs
parent49bac897683340457a0ab1f76b924a1220bdb604 (diff)
downloadlinux-d85327b1d8b74ec168ed34c798a424de82fdc001.tar.xz
btrfs: prefetch chunk tree leaves at mount
The whole chunk tree is read at mount time so we can utilize readahead to get the tree blocks to memory before we read the items. The idea is from Robbie, but instead of updating search slot readahead, this patch implements the chunk tree readahead manually from nodes on level 1. We've decided to do specific readahead optimizations and then unify them under a common API so we don't break everything by changing the search slot readahead logic. Higher chunk trees grow on large filesystems (many terabytes), and prefetching just level 1 seems to be sufficient. Provided example was from a 200TiB filesystem with chunk tree level 2. CC: Robbie Ko <robbieko@synology.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/volumes.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 19213347ec70..c1a4dab8e811 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -7013,6 +7013,19 @@ out:
return ret;
}
+static void readahead_tree_node_children(struct extent_buffer *node)
+{
+ int i;
+ const int nr_items = btrfs_header_nritems(node);
+
+ for (i = 0; i < nr_items; i++) {
+ u64 start;
+
+ start = btrfs_node_blockptr(node, i);
+ readahead_tree_block(node->fs_info, start);
+ }
+}
+
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->chunk_root;
@@ -7023,6 +7036,7 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
int ret;
int slot;
u64 total_dev = 0;
+ u64 last_ra_node = 0;
path = btrfs_alloc_path();
if (!path)
@@ -7056,6 +7070,8 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
if (ret < 0)
goto error;
while (1) {
+ struct extent_buffer *node;
+
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
@@ -7066,6 +7082,17 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
goto error;
break;
}
+ /*
+ * The nodes on level 1 are not locked but we don't need to do
+ * that during mount time as nothing else can access the tree
+ */
+ node = path->nodes[1];
+ if (node) {
+ if (last_ra_node != node->start) {
+ readahead_tree_node_children(node);
+ last_ra_node = node->start;
+ }
+ }
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.type == BTRFS_DEV_ITEM_KEY) {
struct btrfs_dev_item *dev_item;