blob: dd3c3c7a90ec8f79b375624eac961405f6d399bb [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002/*
3 * f2fs shrinker support
4 * the basic infra was copied from fs/ubifs/shrinker.c
5 *
6 * Copyright (c) 2015 Motorola Mobility
7 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
Jaegeuk Kim2658e502015-06-19 12:01:21 -07008 */
9#include <linux/fs.h>
10#include <linux/f2fs_fs.h>
11
12#include "f2fs.h"
Jaegeuk Kimad4edb82016-06-16 16:41:49 -070013#include "node.h"
Jaegeuk Kim2658e502015-06-19 12:01:21 -070014
15static LIST_HEAD(f2fs_list);
16static DEFINE_SPINLOCK(f2fs_list_lock);
17static unsigned int shrinker_run_no;
18
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -070019static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
20{
Jaegeuk Kima95ba662020-11-06 13:22:05 -080021 return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -070022}
23
Chao Yu31696582015-07-28 18:33:46 +080024static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
25{
Chao Yu9a4ffdf2017-09-29 13:59:35 +080026 long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
Chao Yu02110a42016-10-11 22:31:36 +080027
28 return count > 0 ? count : 0;
Chao Yu31696582015-07-28 18:33:46 +080029}
30
Jaegeuk Kim554df792015-06-19 13:41:23 -070031static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
32{
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -080033 return atomic_read(&sbi->total_zombie_tree) +
Jaegeuk Kim7441cce2015-12-21 19:20:15 -080034 atomic_read(&sbi->total_ext_node);
Jaegeuk Kim554df792015-06-19 13:41:23 -070035}
36
Jaegeuk Kim2658e502015-06-19 12:01:21 -070037unsigned long f2fs_shrink_count(struct shrinker *shrink,
38 struct shrink_control *sc)
39{
40 struct f2fs_sb_info *sbi;
41 struct list_head *p;
42 unsigned long count = 0;
43
44 spin_lock(&f2fs_list_lock);
45 p = f2fs_list.next;
46 while (p != &f2fs_list) {
47 sbi = list_entry(p, struct f2fs_sb_info, s_list);
48
49 /* stop f2fs_put_super */
50 if (!mutex_trylock(&sbi->umount_mutex)) {
51 p = p->next;
52 continue;
53 }
54 spin_unlock(&f2fs_list_lock);
55
Jaegeuk Kim554df792015-06-19 13:41:23 -070056 /* count extent cache entries */
57 count += __count_extent_cache(sbi);
58
Chao Yu7a88ddb2020-02-27 19:30:05 +080059 /* count clean nat cache entries */
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -070060 count += __count_nat_entries(sbi);
Jaegeuk Kim2658e502015-06-19 12:01:21 -070061
Chao Yu31696582015-07-28 18:33:46 +080062 /* count free nids cache entries */
63 count += __count_free_nids(sbi);
64
Jaegeuk Kim2658e502015-06-19 12:01:21 -070065 spin_lock(&f2fs_list_lock);
66 p = p->next;
67 mutex_unlock(&sbi->umount_mutex);
68 }
69 spin_unlock(&f2fs_list_lock);
70 return count;
71}
72
73unsigned long f2fs_shrink_scan(struct shrinker *shrink,
74 struct shrink_control *sc)
75{
76 unsigned long nr = sc->nr_to_scan;
77 struct f2fs_sb_info *sbi;
78 struct list_head *p;
79 unsigned int run_no;
80 unsigned long freed = 0;
81
82 spin_lock(&f2fs_list_lock);
83 do {
84 run_no = ++shrinker_run_no;
85 } while (run_no == 0);
86 p = f2fs_list.next;
87 while (p != &f2fs_list) {
88 sbi = list_entry(p, struct f2fs_sb_info, s_list);
89
90 if (sbi->shrinker_run_no == run_no)
91 break;
92
93 /* stop f2fs_put_super */
94 if (!mutex_trylock(&sbi->umount_mutex)) {
95 p = p->next;
96 continue;
97 }
98 spin_unlock(&f2fs_list_lock);
99
100 sbi->shrinker_run_no = run_no;
101
Jaegeuk Kim554df792015-06-19 13:41:23 -0700102 /* shrink extent cache entries */
103 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
104
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -0700105 /* shrink clean nat cache entries */
Jaegeuk Kim554df792015-06-19 13:41:23 -0700106 if (freed < nr)
Chao Yu4d57b862018-05-30 00:20:41 +0800107 freed += f2fs_try_to_free_nats(sbi, nr - freed);
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700108
Chao Yu31696582015-07-28 18:33:46 +0800109 /* shrink free nids cache entries */
110 if (freed < nr)
Chao Yu4d57b862018-05-30 00:20:41 +0800111 freed += f2fs_try_to_free_nids(sbi, nr - freed);
Chao Yu31696582015-07-28 18:33:46 +0800112
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700113 spin_lock(&f2fs_list_lock);
114 p = p->next;
115 list_move_tail(&sbi->s_list, &f2fs_list);
116 mutex_unlock(&sbi->umount_mutex);
117 if (freed >= nr)
118 break;
119 }
120 spin_unlock(&f2fs_list_lock);
121 return freed;
122}
123
124void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
125{
126 spin_lock(&f2fs_list_lock);
127 list_add_tail(&sbi->s_list, &f2fs_list);
128 spin_unlock(&f2fs_list_lock);
129}
130
131void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
132{
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700133 f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
134
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700135 spin_lock(&f2fs_list_lock);
Sahitya Tummalae4589fa2018-12-18 16:39:24 +0530136 list_del_init(&sbi->s_list);
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700137 spin_unlock(&f2fs_list_lock);
138}