summaryrefslogtreecommitdiff
path: root/fs/f2fs
diff options
context:
space:
mode:
authorChao Yu <chao2.yu@samsung.com>2015-07-28 13:33:46 +0300
committerJaegeuk Kim <jaegeuk@kernel.org>2015-08-20 19:00:06 +0300
commit31696580bf4c042a0f7b06d855e04441488d18b1 (patch)
tree33904342d5332881fc6ffc925dbcb294c8ed87e4 /fs/f2fs
parent206e61be29624499af46546076e835da93e6bde5 (diff)
downloadlinux-31696580bf4c042a0f7b06d855e04441488d18b1.tar.xz
f2fs: shrink free_nids entries
This patch introduces __count_free_nids/try_to_free_nids and registers them in slab shrinker for shrinking under memory pressure. Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/f2fs.h1
-rw-r--r--fs/f2fs/node.c28
-rw-r--r--fs/f2fs/segment.c3
-rw-r--r--fs/f2fs/shrinker.c14
4 files changed, 46 insertions, 0 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index cc07b1595a92..23bfc0ccaf10 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1681,6 +1681,7 @@ int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
bool alloc_nid(struct f2fs_sb_info *, nid_t *);
void alloc_nid_done(struct f2fs_sb_info *, nid_t);
void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
+int try_to_free_nids(struct f2fs_sb_info *, int);
void recover_inline_xattr(struct inode *, struct page *);
void recover_xattr_data(struct inode *, struct page *, block_t);
int recover_inode_page(struct f2fs_sb_info *, struct page *);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index ac9110788b17..6e10c2a08ec6 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1635,6 +1635,34 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
kmem_cache_free(free_nid_slab, i);
}
+int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct free_nid *i, *next;
+ int nr = nr_shrink;
+
+ if (!mutex_trylock(&nm_i->build_lock))
+ return 0;
+
+ spin_lock(&nm_i->free_nid_list_lock);
+ list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
+ if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
+ break;
+ if (i->state == NID_ALLOC)
+ continue;
+ __del_from_free_nid_list(nm_i, i);
+ nm_i->fcnt--;
+ spin_unlock(&nm_i->free_nid_list_lock);
+ kmem_cache_free(free_nid_slab, i);
+ nr_shrink--;
+ spin_lock(&nm_i->free_nid_list_lock);
+ }
+ spin_unlock(&nm_i->free_nid_list_lock);
+ mutex_unlock(&nm_i->build_lock);
+
+ return nr - nr_shrink;
+}
+
void recover_inline_xattr(struct inode *inode, struct page *page)
{
void *src_addr, *dst_addr;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index bf1605dbce93..1b4265639f07 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -310,6 +310,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
if (!available_free_memory(sbi, NAT_ENTRIES))
try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
+ if (!available_free_memory(sbi, FREE_NIDS))
+ try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+
/* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) ||
excess_prefree_segs(sbi) ||
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 9aa4235cd304..da0d8e0b55a5 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -23,6 +23,13 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
}
+static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
+{
+ if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
+ return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
+ return 0;
+}
+
static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
{
return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
@@ -53,6 +60,9 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
/* shrink clean nat cache entries */
count += __count_nat_entries(sbi);
+ /* count free nids cache entries */
+ count += __count_free_nids(sbi);
+
spin_lock(&f2fs_list_lock);
p = p->next;
mutex_unlock(&sbi->umount_mutex);
@@ -97,6 +107,10 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
if (freed < nr)
freed += try_to_free_nats(sbi, nr - freed);
+ /* shrink free nids cache entries */
+ if (freed < nr)
+ freed += try_to_free_nids(sbi, nr - freed);
+
spin_lock(&f2fs_list_lock);
p = p->next;
list_move_tail(&sbi->s_list, &f2fs_list);