summaryrefslogtreecommitdiff
path: root/fs/btrfs/send.c
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2023-01-11 14:36:13 +0300
committerDavid Sterba <dsterba@suse.com>2023-02-13 19:50:35 +0300
commit90b90d4ac03cbaaa87a81ee04e9cdd9b4225cdd4 (patch)
tree8bd9c791e0de5b4100b2a2dac3a1845676853eaf /fs/btrfs/send.c
parentd307d2f35ca5c57ef2966d2a7ccb2de1118f61f4 (diff)
downloadlinux-90b90d4ac03cbaaa87a81ee04e9cdd9b4225cdd4.tar.xz
btrfs: send: genericize the backref cache to allow it to be reused
The backref cache is a cache backed by a maple tree and a linked list to keep track of temporal access to cached entries (the LRU entry always at the head of the list). This type of caching method is going to be useful in other scenarios, so make the cache implementation more generic and move it into its own header and source files. This patch is part of a larger patchset and the changelog of the last patch in the series contains a sample performance test and results. The patches that comprise the patchset are the following: btrfs: send: directly return from did_overwrite_ref() and simplify it btrfs: send: avoid unnecessary generation search at did_overwrite_ref() btrfs: send: directly return from will_overwrite_ref() and simplify it btrfs: send: avoid extra b+tree searches when checking reference overrides btrfs: send: remove send_progress argument from can_rmdir() btrfs: send: avoid duplicated orphan dir allocation and initialization btrfs: send: avoid unnecessary orphan dir rbtree search at can_rmdir() btrfs: send: reduce searches on parent root when checking if dir can be removed btrfs: send: iterate waiting dir move rbtree only once when processing refs btrfs: send: initialize all the red black trees earlier btrfs: send: genericize the backref cache to allow it to be reused btrfs: adapt lru cache to allow for 64 bits keys on 32 bits systems btrfs: send: cache information about created directories btrfs: allow a generation number to be associated with lru cache entries btrfs: add an api to delete a specific entry from the lru cache btrfs: send: use the lru cache to implement the name cache btrfs: send: update size of roots array for backref cache entries btrfs: send: cache utimes operations for directories if possible Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/send.c')
-rw-r--r--fs/btrfs/send.c80
1 files changed, 24 insertions, 56 deletions
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 44fc1facbd1f..9c5a2c96e544 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -32,6 +32,7 @@
#include "file-item.h"
#include "ioctl.h"
#include "verity.h"
+#include "lru_cache.h"
/*
* Maximum number of references an extent can have in order for us to attempt to
@@ -107,15 +108,15 @@ struct clone_root {
* x86_64).
*/
struct backref_cache_entry {
- /* List to link to the cache's lru list. */
- struct list_head list;
- /* The key for this entry in the cache. */
- u64 key;
+ struct btrfs_lru_cache_entry entry;
u64 root_ids[SEND_MAX_BACKREF_CACHE_ROOTS];
/* Number of valid elements in the root_ids array. */
int num_roots;
};
+/* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */
+static_assert(offsetof(struct backref_cache_entry, entry) == 0);
+
struct send_ctx {
struct file *send_filp;
loff_t send_off;
@@ -285,13 +286,8 @@ struct send_ctx {
struct rb_root rbtree_new_refs;
struct rb_root rbtree_deleted_refs;
- struct {
- u64 last_reloc_trans;
- struct list_head lru_list;
- struct maple_tree entries;
- /* Number of entries stored in the cache. */
- int size;
- } backref_cache;
+ struct btrfs_lru_cache backref_cache;
+ u64 backref_cache_last_reloc_trans;
};
struct pending_dir_move {
@@ -1387,19 +1383,6 @@ static int iterate_backrefs(u64 ino, u64 offset, u64 num_bytes, u64 root_id,
return 0;
}
-static void empty_backref_cache(struct send_ctx *sctx)
-{
- struct backref_cache_entry *entry;
- struct backref_cache_entry *tmp;
-
- list_for_each_entry_safe(entry, tmp, &sctx->backref_cache.lru_list, list)
- kfree(entry);
-
- INIT_LIST_HEAD(&sctx->backref_cache.lru_list);
- mtree_destroy(&sctx->backref_cache.entries);
- sctx->backref_cache.size = 0;
-}
-
static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx,
const u64 **root_ids_ret, int *root_count_ret)
{
@@ -1407,9 +1390,10 @@ static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx,
struct send_ctx *sctx = bctx->sctx;
struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
const u64 key = leaf_bytenr >> fs_info->sectorsize_bits;
+ struct btrfs_lru_cache_entry *raw_entry;
struct backref_cache_entry *entry;
- if (sctx->backref_cache.size == 0)
+ if (btrfs_lru_cache_size(&sctx->backref_cache) == 0)
return false;
/*
@@ -1423,18 +1407,18 @@ static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx,
* transaction handle or holding fs_info->commit_root_sem, so no need
* to take any lock here.
*/
- if (fs_info->last_reloc_trans > sctx->backref_cache.last_reloc_trans) {
- empty_backref_cache(sctx);
+ if (fs_info->last_reloc_trans > sctx->backref_cache_last_reloc_trans) {
+ btrfs_lru_cache_clear(&sctx->backref_cache);
return false;
}
- entry = mtree_load(&sctx->backref_cache.entries, key);
- if (!entry)
+ raw_entry = btrfs_lru_cache_lookup(&sctx->backref_cache, key);
+ if (!raw_entry)
return false;
+ entry = container_of(raw_entry, struct backref_cache_entry, entry);
*root_ids_ret = entry->root_ids;
*root_count_ret = entry->num_roots;
- list_move_tail(&entry->list, &sctx->backref_cache.lru_list);
return true;
}
@@ -1460,7 +1444,7 @@ static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids,
if (!new_entry)
return;
- new_entry->key = leaf_bytenr >> fs_info->sectorsize_bits;
+ new_entry->entry.key = leaf_bytenr >> fs_info->sectorsize_bits;
new_entry->num_roots = 0;
ULIST_ITER_INIT(&uiter);
while ((node = ulist_next(root_ids, &uiter)) != NULL) {
@@ -1488,23 +1472,12 @@ static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids,
* none of the roots is part of the list of roots from which we are
* allowed to clone. Cache the new entry as it's still useful to avoid
* backref walking to determine which roots have a path to the leaf.
+ *
+ * Also use GFP_NOFS because we're called while holding a transaction
+ * handle or while holding fs_info->commit_root_sem.
*/
-
- if (sctx->backref_cache.size >= SEND_MAX_BACKREF_CACHE_SIZE) {
- struct backref_cache_entry *lru_entry;
- struct backref_cache_entry *mt_entry;
-
- lru_entry = list_first_entry(&sctx->backref_cache.lru_list,
- struct backref_cache_entry, list);
- mt_entry = mtree_erase(&sctx->backref_cache.entries, lru_entry->key);
- ASSERT(mt_entry == lru_entry);
- list_del(&mt_entry->list);
- kfree(mt_entry);
- sctx->backref_cache.size--;
- }
-
- ret = mtree_insert(&sctx->backref_cache.entries, new_entry->key,
- new_entry, GFP_NOFS);
+ ret = btrfs_lru_cache_store(&sctx->backref_cache, &new_entry->entry,
+ GFP_NOFS);
ASSERT(ret == 0 || ret == -ENOMEM);
if (ret) {
/* Caching is optional, no worries. */
@@ -1512,17 +1485,13 @@ static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids,
return;
}
- list_add_tail(&new_entry->list, &sctx->backref_cache.lru_list);
-
/*
* We are called from iterate_extent_inodes() while either holding a
* transaction handle or holding fs_info->commit_root_sem, so no need
* to take any lock here.
*/
- if (sctx->backref_cache.size == 0)
- sctx->backref_cache.last_reloc_trans = fs_info->last_reloc_trans;
-
- sctx->backref_cache.size++;
+ if (btrfs_lru_cache_size(&sctx->backref_cache) == 1)
+ sctx->backref_cache_last_reloc_trans = fs_info->last_reloc_trans;
}
static int check_extent_item(u64 bytenr, const struct btrfs_extent_item *ei,
@@ -8139,8 +8108,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
INIT_LIST_HEAD(&sctx->name_cache_list);
- INIT_LIST_HEAD(&sctx->backref_cache.lru_list);
- mt_init(&sctx->backref_cache.entries);
+ btrfs_lru_cache_init(&sctx->backref_cache, SEND_MAX_BACKREF_CACHE_SIZE);
sctx->pending_dir_moves = RB_ROOT;
sctx->waiting_dir_moves = RB_ROOT;
@@ -8404,7 +8372,7 @@ out:
close_current_inode(sctx);
- empty_backref_cache(sctx);
+ btrfs_lru_cache_clear(&sctx->backref_cache);
kfree(sctx);
}