summaryrefslogtreecommitdiff
path: root/fs/btrfs/lru_cache.c
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2023-01-11 14:36:14 +0300
committerDavid Sterba <dsterba@suse.com>2023-02-13 19:50:36 +0300
commit6273ee621f3d5829667943c1f53d8115752b45aa (patch)
tree102d42506b21748385f8f64eaa25e92b399d7567 /fs/btrfs/lru_cache.c
parent90b90d4ac03cbaaa87a81ee04e9cdd9b4225cdd4 (diff)
downloadlinux-6273ee621f3d5829667943c1f53d8115752b45aa.tar.xz
btrfs: adapt lru cache to allow for 64 bits keys on 32 bits systems
The lru cache is backed by a maple tree, which uses the unsigned long type for keys, and that type has a width of 32 bits on 32 bits systems and a width of 64 bits on 64 bits systems. Currently there is only one user of the lru cache, the send backref cache, which uses a sector number as a key, a logical address right shifted by fs_info->sectorsize_bits, so a 32 bits width is not yet a problem (the same happens with the radix tree we use to track extent buffers, fs_info->buffer_radix). However the next patches in the series will start using the lru cache for cases where inode numbers are the keys, and the inode numbers are always 64 bits, even if we are running on a 32 bits system. So adapt the lru cache to allow multiple values under the same key, by having the maple tree store a head entry that points to a list of entries instead of pointing to a single entry. This is a similar approach to what we currently do for the name cache in send (which uses a radix tree that has indexes with an unsigned long type as well), and will allow later to use the lru cache for the send name cache as well. This patch is part of a larger patchset and the changelog of the last patch in the series contains a sample performance test and results. The patches that comprise the patchset are the following: btrfs: send: directly return from did_overwrite_ref() and simplify it btrfs: send: avoid unnecessary generation search at did_overwrite_ref() btrfs: send: directly return from will_overwrite_ref() and simplify it btrfs: send: avoid extra b+tree searches when checking reference overrides btrfs: send: remove send_progress argument from can_rmdir() btrfs: send: avoid duplicated orphan dir allocation and initialization btrfs: send: avoid unnecessary orphan dir rbtree search at can_rmdir() btrfs: send: reduce searches on parent root when checking if dir can be removed btrfs: send: iterate waiting dir move rbtree only once when processing refs btrfs: send: initialize all the red black trees earlier btrfs: send: genericize the backref cache to allow it to be reused btrfs: adapt lru cache to allow for 64 bits keys on 32 bits systems btrfs: send: cache information about created directories btrfs: allow a generation number to be associated with lru cache entries btrfs: add an api to delete a specific entry from the lru cache btrfs: send: use the lru cache to implement the name cache btrfs: send: update size of roots array for backref cache entries btrfs: send: cache utimes operations for directories if possible Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/lru_cache.c')
-rw-r--r--fs/btrfs/lru_cache.c87
1 files changed, 72 insertions, 15 deletions
diff --git a/fs/btrfs/lru_cache.c b/fs/btrfs/lru_cache.c
index 177e7e705363..6012bceedffc 100644
--- a/fs/btrfs/lru_cache.c
+++ b/fs/btrfs/lru_cache.c
@@ -18,6 +18,18 @@ void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size)
cache->max_size = max_size;
}
+static struct btrfs_lru_cache_entry *match_entry(struct list_head *head, u64 key)
+{
+ struct btrfs_lru_cache_entry *entry;
+
+ list_for_each_entry(entry, head, list) {
+ if (entry->key == key)
+ return entry;
+ }
+
+ return NULL;
+}
+
/*
* Lookup for an entry in the cache.
*
@@ -29,15 +41,48 @@ void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size)
struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache,
u64 key)
{
+ struct list_head *head;
struct btrfs_lru_cache_entry *entry;
- entry = mtree_load(&cache->entries, key);
+ head = mtree_load(&cache->entries, key);
+ if (!head)
+ return NULL;
+
+ entry = match_entry(head, key);
if (entry)
list_move_tail(&entry->lru_list, &cache->lru_list);
return entry;
}
+static void delete_entry(struct btrfs_lru_cache *cache,
+ struct btrfs_lru_cache_entry *entry)
+{
+ struct list_head *prev = entry->list.prev;
+
+ ASSERT(cache->size > 0);
+ ASSERT(!mtree_empty(&cache->entries));
+
+ list_del(&entry->list);
+ list_del(&entry->lru_list);
+
+ if (list_empty(prev)) {
+ struct list_head *head;
+
+ /*
+ * If previous element in the list entry->list is now empty, it
+ * means it's a head entry not pointing to any cached entries,
+ * so remove it from the maple tree and free it.
+ */
+ head = mtree_erase(&cache->entries, entry->key);
+ ASSERT(head == prev);
+ kfree(head);
+ }
+
+ kfree(entry);
+ cache->size--;
+}
+
/*
* Store an entry in the cache.
*
@@ -50,26 +95,39 @@ int btrfs_lru_cache_store(struct btrfs_lru_cache *cache,
struct btrfs_lru_cache_entry *new_entry,
gfp_t gfp)
{
+ const u64 key = new_entry->key;
+ struct list_head *head;
int ret;
+ head = kmalloc(sizeof(*head), gfp);
+ if (!head)
+ return -ENOMEM;
+
+ ret = mtree_insert(&cache->entries, key, head, gfp);
+ if (ret == 0) {
+ INIT_LIST_HEAD(head);
+ list_add_tail(&new_entry->list, head);
+ } else if (ret == -EEXIST) {
+ kfree(head);
+ head = mtree_load(&cache->entries, key);
+ ASSERT(head != NULL);
+ if (match_entry(head, key) != NULL)
+ return -EEXIST;
+ list_add_tail(&new_entry->list, head);
+ } else if (ret < 0) {
+ kfree(head);
+ return ret;
+ }
+
if (cache->size == cache->max_size) {
struct btrfs_lru_cache_entry *lru_entry;
- struct btrfs_lru_cache_entry *mt_entry;
lru_entry = list_first_entry(&cache->lru_list,
struct btrfs_lru_cache_entry,
lru_list);
- mt_entry = mtree_erase(&cache->entries, lru_entry->key);
- ASSERT(mt_entry == lru_entry);
- list_del(&mt_entry->lru_list);
- kfree(mt_entry);
- cache->size--;
+ delete_entry(cache, lru_entry);
}
- ret = mtree_insert(&cache->entries, new_entry->key, new_entry, gfp);
- if (ret < 0)
- return ret;
-
list_add_tail(&new_entry->lru_list, &cache->lru_list);
cache->size++;
@@ -89,9 +147,8 @@ void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache)
struct btrfs_lru_cache_entry *tmp;
list_for_each_entry_safe(entry, tmp, &cache->lru_list, lru_list)
- kfree(entry);
+ delete_entry(cache, entry);
- INIT_LIST_HEAD(&cache->lru_list);
- mtree_destroy(&cache->entries);
- cache->size = 0;
+ ASSERT(cache->size == 0);
+ ASSERT(mtree_empty(&cache->entries));
}