summaryrefslogtreecommitdiff
path: root/fs/btrfs/backref.c
diff options
context:
space:
mode:
authorWang Shilong <wangsl.fnst@cn.fujitsu.com>2013-08-09 09:25:36 +0400
committerChris Mason <chris.mason@fusionio.com>2013-09-01 16:16:27 +0400
commitb9e9a6cbc6d25b89d8007e5a680319e07921ead8 (patch)
tree600fe0d3d6962bf13d6efbca1ddc9cbcb4f0d06b /fs/btrfs/backref.c
parent742916b885edbc6453b4769458959929746e8e7e (diff)
downloadlinux-b9e9a6cbc6d25b89d8007e5a680319e07921ead8.tar.xz
Btrfs: allocate prelim_ref with a slab allocater
struct __prelim_ref is allocated and freed frequently when walking backref tree, using slab allocater can not only speed up allocating but also detect memory leaks. Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com> Reviewed-by: Miao Xie <miaox@cn.fujitsu.com> Reviewed-by: Jan Schmidt <list.btrfs@jan-o-sch.net> Signed-off-by: Josef Bacik <jbacik@fusionio.com> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs/btrfs/backref.c')
-rw-r--r--fs/btrfs/backref.c33
1 files changed, 27 insertions, 6 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f3cb19114c95..0552a599b28f 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -119,6 +119,26 @@ struct __prelim_ref {
u64 wanted_disk_byte;
};
+static struct kmem_cache *btrfs_prelim_ref_cache;
+
+int __init btrfs_prelim_ref_init(void)
+{
+ btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
+ sizeof(struct __prelim_ref),
+ 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ NULL);
+ if (!btrfs_prelim_ref_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void btrfs_prelim_ref_exit(void)
+{
+ if (btrfs_prelim_ref_cache)
+ kmem_cache_destroy(btrfs_prelim_ref_cache);
+}
+
/*
* the rules for all callers of this function are:
* - obtaining the parent is the goal
@@ -165,7 +185,7 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
{
struct __prelim_ref *ref;
- ref = kmalloc(sizeof(*ref), gfp_mask);
+ ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
if (!ref)
return -ENOMEM;
@@ -368,7 +388,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
/* additional parents require new refs being added here */
while ((node = ulist_next(parents, &uiter))) {
- new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
+ new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
+ GFP_NOFS);
if (!new_ref) {
ret = -ENOMEM;
goto out;
@@ -492,7 +513,7 @@ static void __merge_refs(struct list_head *head, int mode)
ref1->count += ref2->count;
list_del(&ref2->list);
- kfree(ref2);
+ kmem_cache_free(btrfs_prelim_ref_cache, ref2);
}
}
@@ -955,7 +976,7 @@ again:
}
}
list_del(&ref->list);
- kfree(ref);
+ kmem_cache_free(btrfs_prelim_ref_cache, ref);
}
out:
@@ -963,13 +984,13 @@ out:
while (!list_empty(&prefs)) {
ref = list_first_entry(&prefs, struct __prelim_ref, list);
list_del(&ref->list);
- kfree(ref);
+ kmem_cache_free(btrfs_prelim_ref_cache, ref);
}
while (!list_empty(&prefs_delayed)) {
ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
list);
list_del(&ref->list);
- kfree(ref);
+ kmem_cache_free(btrfs_prelim_ref_cache, ref);
}
return ret;