Btrfs: allocate prelim_ref with a slab allocater
struct __prelim_ref is allocated and freed frequently when
walking backref tree, using slab allocater can not only
speed up allocating but also detect memory leaks.
Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
Reviewed-by: Miao Xie <miaox@cn.fujitsu.com>
Reviewed-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f3cb191..0552a59 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -119,6 +119,26 @@
u64 wanted_disk_byte;
};
+static struct kmem_cache *btrfs_prelim_ref_cache;
+
+int __init btrfs_prelim_ref_init(void)
+{
+ btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
+ sizeof(struct __prelim_ref),
+ 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ NULL);
+ if (!btrfs_prelim_ref_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void btrfs_prelim_ref_exit(void)
+{
+ if (btrfs_prelim_ref_cache)
+ kmem_cache_destroy(btrfs_prelim_ref_cache);
+}
+
/*
* the rules for all callers of this function are:
* - obtaining the parent is the goal
@@ -165,7 +185,7 @@
{
struct __prelim_ref *ref;
- ref = kmalloc(sizeof(*ref), gfp_mask);
+ ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
if (!ref)
return -ENOMEM;
@@ -368,7 +388,8 @@
/* additional parents require new refs being added here */
while ((node = ulist_next(parents, &uiter))) {
- new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
+ new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
+ GFP_NOFS);
if (!new_ref) {
ret = -ENOMEM;
goto out;
@@ -492,7 +513,7 @@
ref1->count += ref2->count;
list_del(&ref2->list);
- kfree(ref2);
+ kmem_cache_free(btrfs_prelim_ref_cache, ref2);
}
}
@@ -955,7 +976,7 @@
}
}
list_del(&ref->list);
- kfree(ref);
+ kmem_cache_free(btrfs_prelim_ref_cache, ref);
}
out:
@@ -963,13 +984,13 @@
while (!list_empty(&prefs)) {
ref = list_first_entry(&prefs, struct __prelim_ref, list);
list_del(&ref->list);
- kfree(ref);
+ kmem_cache_free(btrfs_prelim_ref_cache, ref);
}
while (!list_empty(&prefs_delayed)) {
ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
list);
list_del(&ref->list);
- kfree(ref);
+ kmem_cache_free(btrfs_prelim_ref_cache, ref);
}
return ret;
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 8f2e767..a910b27 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -72,4 +72,6 @@
struct btrfs_inode_extref **ret_extref,
u64 *found_off);
+int __init btrfs_prelim_ref_init(void);
+void btrfs_prelim_ref_exit(void);
#endif
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4d5d0f3..3aab10c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -56,6 +56,7 @@
#include "rcu-string.h"
#include "dev-replace.h"
#include "free-space-cache.h"
+#include "backref.h"
#include "tests/btrfs-tests.h"
#define CREATE_TRACE_POINTS
@@ -1810,6 +1811,10 @@
if (err)
goto free_auto_defrag;
+ err = btrfs_prelim_ref_init();
+ if (err)
+ goto free_prelim_ref;
+
err = btrfs_interface_init();
if (err)
goto free_delayed_ref;
@@ -1830,6 +1835,8 @@
unregister_ioctl:
btrfs_interface_exit();
+free_prelim_ref:
+ btrfs_prelim_ref_exit();
free_delayed_ref:
btrfs_delayed_ref_exit();
free_auto_defrag:
@@ -1856,6 +1863,7 @@
btrfs_delayed_ref_exit();
btrfs_auto_defrag_exit();
btrfs_delayed_inode_exit();
+ btrfs_prelim_ref_exit();
ordered_data_exit();
extent_map_exit();
extent_io_exit();