blob: 69d0ac1b6cacffc00ab27a0d9efd15d2c6e35d91 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/mpage.h>
14#include <linux/backing-dev.h>
15#include <linux/blkdev.h>
16#include <linux/pagevec.h>
17#include <linux/swap.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
Yunlei He87905682017-07-18 09:48:12 +080022#include "xattr.h"
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -080023#include "trace.h"
Namjae Jeon51dd6242013-04-20 01:28:52 +090024#include <trace/events/f2fs.h>
Jaegeuk Kime05df3b2012-11-02 17:08:50 +090025
Chao Yu4d57b862018-05-30 00:20:41 +080026#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
Gu Zhengf978f5a2014-02-21 18:08:29 +080027
Jaegeuk Kime05df3b2012-11-02 17:08:50 +090028static struct kmem_cache *nat_entry_slab;
29static struct kmem_cache *free_nid_slab;
Chao Yuaec71382014-06-24 09:18:20 +080030static struct kmem_cache *nat_entry_set_slab;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +090031
Jaegeuk Kima4f843b2018-04-23 23:02:31 -060032/*
33 * Check whether the given nid is within node id range.
34 */
Chao Yu4d57b862018-05-30 00:20:41 +080035int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
Jaegeuk Kima4f843b2018-04-23 23:02:31 -060036{
37 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
38 set_sbi_flag(sbi, SBI_NEED_FSCK);
39 f2fs_msg(sbi->sb, KERN_WARNING,
40 "%s: out-of-range nid=%x, run fsck to fix.",
41 __func__, nid);
42 return -EINVAL;
43 }
44 return 0;
45}
46
Chao Yu4d57b862018-05-30 00:20:41 +080047bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
Jaegeuk Kimcdfc41c2014-03-19 13:31:37 +090048{
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +090049 struct f2fs_nm_info *nm_i = NM_I(sbi);
Jaegeuk Kimcdfc41c2014-03-19 13:31:37 +090050 struct sysinfo val;
Jaegeuk Kime5e7ea32014-11-06 15:24:46 -080051 unsigned long avail_ram;
Jaegeuk Kimcdfc41c2014-03-19 13:31:37 +090052 unsigned long mem_size = 0;
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +090053 bool res = false;
Jaegeuk Kimcdfc41c2014-03-19 13:31:37 +090054
55 si_meminfo(&val);
Jaegeuk Kime5e7ea32014-11-06 15:24:46 -080056
57 /* only uses low memory */
58 avail_ram = val.totalram - val.totalhigh;
59
Chao Yu429511c2015-02-05 17:54:31 +080060 /*
61 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
62 */
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +090063 if (type == FREE_NIDS) {
Chao Yu9a4ffdf2017-09-29 13:59:35 +080064 mem_size = (nm_i->nid_cnt[FREE_NID] *
Chao Yub8559dc2016-10-12 19:28:29 +080065 sizeof(struct free_nid)) >> PAGE_SHIFT;
Jaegeuk Kime5e7ea32014-11-06 15:24:46 -080066 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +090067 } else if (type == NAT_ENTRIES) {
Jaegeuk Kime5e7ea32014-11-06 15:24:46 -080068 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030069 PAGE_SHIFT;
Jaegeuk Kime5e7ea32014-11-06 15:24:46 -080070 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
Jaegeuk Kime589c2c2016-06-02 15:24:24 -070071 if (excess_cached_nats(sbi))
72 res = false;
Jaegeuk Kima1257022015-10-08 10:40:07 -070073 } else if (type == DIRTY_DENTS) {
74 if (sbi->sb->s_bdi->wb.dirty_exceeded)
75 return false;
76 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
77 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
Jaegeuk Kime5e7ea32014-11-06 15:24:46 -080078 } else if (type == INO_ENTRIES) {
79 int i;
80
Chao Yu39d787b2017-09-29 13:59:38 +080081 for (i = 0; i < MAX_INO_ENTRY; i++)
Kinglong Mee8f73cbb2017-03-18 09:26:13 +080082 mem_size += sbi->im[i].ino_num *
83 sizeof(struct ino_entry);
84 mem_size >>= PAGE_SHIFT;
Jaegeuk Kime5e7ea32014-11-06 15:24:46 -080085 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
Chao Yu429511c2015-02-05 17:54:31 +080086 } else if (type == EXTENT_CACHE) {
Jaegeuk Kim7441cce2015-12-21 19:20:15 -080087 mem_size = (atomic_read(&sbi->total_ext_tree) *
88 sizeof(struct extent_tree) +
Chao Yu429511c2015-02-05 17:54:31 +080089 atomic_read(&sbi->total_ext_node) *
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030090 sizeof(struct extent_node)) >> PAGE_SHIFT;
Chao Yu429511c2015-02-05 17:54:31 +080091 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
Jaegeuk Kim57864ae2017-10-18 19:05:57 -070092 } else if (type == INMEM_PAGES) {
93 /* it allows 20% / total_ram for inmemory pages */
94 mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
95 res = mem_size < (val.totalram / 5);
Jaegeuk Kim1e843712014-12-09 06:08:59 -080096 } else {
Jaegeuk Kim1663cae2016-01-09 16:14:08 -080097 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
98 return true;
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +090099 }
100 return res;
Jaegeuk Kimcdfc41c2014-03-19 13:31:37 +0900101}
102
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900103static void clear_node_page_dirty(struct page *page)
104{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900105 if (PageDirty(page)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800106 f2fs_clear_radix_tree_dirty_tag(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900107 clear_page_dirty_for_io(page);
Chao Yuaec2f722018-05-26 18:03:35 +0800108 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900109 }
110 ClearPageUptodate(page);
111}
112
113static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
114{
115 pgoff_t index = current_nat_addr(sbi, nid);
Chao Yu77357302018-07-17 00:02:17 +0800116 return f2fs_get_meta_page_nofail(sbi, index);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900117}
118
119static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
120{
121 struct page *src_page;
122 struct page *dst_page;
123 pgoff_t src_off;
124 pgoff_t dst_off;
125 void *src_addr;
126 void *dst_addr;
127 struct f2fs_nm_info *nm_i = NM_I(sbi);
128
129 src_off = current_nat_addr(sbi, nid);
130 dst_off = next_nat_addr(sbi, src_off);
131
132 /* get current nat block page with lock */
Chao Yu4d57b862018-05-30 00:20:41 +0800133 src_page = f2fs_get_meta_page(sbi, src_off);
134 dst_page = f2fs_grab_meta_page(sbi, dst_off);
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700135 f2fs_bug_on(sbi, PageDirty(src_page));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900136
137 src_addr = page_address(src_page);
138 dst_addr = page_address(dst_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300139 memcpy(dst_addr, src_addr, PAGE_SIZE);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900140 set_page_dirty(dst_page);
141 f2fs_put_page(src_page, 1);
142
143 set_to_next_nat(nm_i, nid);
144
145 return dst_page;
146}
147
Yunlei He12f9ef32017-11-10 13:36:51 -0800148static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
149{
150 struct nat_entry *new;
151
152 if (no_fail)
Chao Yu2882d342018-01-25 18:57:25 +0800153 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
Yunlei He12f9ef32017-11-10 13:36:51 -0800154 else
Chao Yu2882d342018-01-25 18:57:25 +0800155 new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
Yunlei He12f9ef32017-11-10 13:36:51 -0800156 if (new) {
157 nat_set_nid(new, nid);
158 nat_reset_flag(new);
159 }
160 return new;
161}
162
163static void __free_nat_entry(struct nat_entry *e)
164{
165 kmem_cache_free(nat_entry_slab, e);
166}
167
168/* must be locked by nat_tree_lock */
169static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
170 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
171{
172 if (no_fail)
173 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
174 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
175 return NULL;
176
177 if (raw_ne)
178 node_info_from_raw_nat(&ne->ni, raw_ne);
179 list_add_tail(&ne->list, &nm_i->nat_entries);
180 nm_i->nat_cnt++;
181 return ne;
182}
183
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900184static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
185{
186 return radix_tree_lookup(&nm_i->nat_root, n);
187}
188
189static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
190 nid_t start, unsigned int nr, struct nat_entry **ep)
191{
192 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
193}
194
195static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
196{
197 list_del(&e->list);
198 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
199 nm_i->nat_cnt--;
Yunlei He12f9ef32017-11-10 13:36:51 -0800200 __free_nat_entry(e);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900201}
202
Chao Yu780de472018-03-20 23:08:30 +0800203static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
204 struct nat_entry *ne)
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700205{
206 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
207 struct nat_entry_set *head;
208
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700209 head = radix_tree_lookup(&nm_i->nat_set_root, set);
210 if (!head) {
Jaegeuk Kim80c54502015-08-20 08:51:56 -0700211 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700212
213 INIT_LIST_HEAD(&head->entry_list);
214 INIT_LIST_HEAD(&head->set_list);
215 head->set = set;
216 head->entry_cnt = 0;
Jaegeuk Kim9be32d72014-12-05 10:39:49 -0800217 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700218 }
Chao Yu780de472018-03-20 23:08:30 +0800219 return head;
220}
221
222static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
223 struct nat_entry *ne)
224{
225 struct nat_entry_set *head;
226 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
227
228 if (!new_ne)
229 head = __grab_nat_entry_set(nm_i, ne);
230
231 /*
232 * update entry_cnt in below condition:
233 * 1. update NEW_ADDR to valid block address;
234 * 2. update old block address to new one;
235 */
236 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
237 !get_nat_flag(ne, IS_DIRTY)))
238 head->entry_cnt++;
239
240 set_nat_flag(ne, IS_PREALLOC, new_ne);
Chao Yufebeca62017-06-05 18:29:08 +0800241
242 if (get_nat_flag(ne, IS_DIRTY))
243 goto refresh_list;
244
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700245 nm_i->dirty_nat_cnt++;
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700246 set_nat_flag(ne, IS_DIRTY, true);
Chao Yufebeca62017-06-05 18:29:08 +0800247refresh_list:
Chao Yu780de472018-03-20 23:08:30 +0800248 if (new_ne)
Chao Yufebeca62017-06-05 18:29:08 +0800249 list_del_init(&ne->list);
250 else
251 list_move_tail(&ne->list, &head->entry_list);
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700252}
253
254static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
Kinglong Mee0b28b712017-02-28 21:34:47 +0800255 struct nat_entry_set *set, struct nat_entry *ne)
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700256{
Kinglong Mee0b28b712017-02-28 21:34:47 +0800257 list_move_tail(&ne->list, &nm_i->nat_entries);
258 set_nat_flag(ne, IS_DIRTY, false);
259 set->entry_cnt--;
260 nm_i->dirty_nat_cnt--;
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -0700261}
262
263static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
264 nid_t start, unsigned int nr, struct nat_entry_set **ep)
265{
266 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
267 start, nr);
268}
269
Chao Yu4d57b862018-05-30 00:20:41 +0800270int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
Jaegeuk Kim2dcf51a2015-04-29 18:31:19 -0700271{
272 struct f2fs_nm_info *nm_i = NM_I(sbi);
273 struct nat_entry *e;
274 bool need = false;
275
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700276 down_read(&nm_i->nat_tree_lock);
Jaegeuk Kim2dcf51a2015-04-29 18:31:19 -0700277 e = __lookup_nat_cache(nm_i, nid);
278 if (e) {
279 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
280 !get_nat_flag(e, HAS_FSYNCED_INODE))
281 need = true;
282 }
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700283 up_read(&nm_i->nat_tree_lock);
Jaegeuk Kim2dcf51a2015-04-29 18:31:19 -0700284 return need;
285}
286
Chao Yu4d57b862018-05-30 00:20:41 +0800287bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900288{
289 struct f2fs_nm_info *nm_i = NM_I(sbi);
290 struct nat_entry *e;
Jaegeuk Kim88bd02c2014-09-15 14:50:48 -0700291 bool is_cp = true;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900292
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700293 down_read(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900294 e = __lookup_nat_cache(nm_i, nid);
Jaegeuk Kim7ef35e32014-09-15 12:07:13 -0700295 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
Jaegeuk Kim88bd02c2014-09-15 14:50:48 -0700296 is_cp = false;
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700297 up_read(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900298 return is_cp;
299}
300
Chao Yu4d57b862018-05-30 00:20:41 +0800301bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
Jaegeuk Kimb6fe5872014-06-04 00:39:42 +0900302{
303 struct f2fs_nm_info *nm_i = NM_I(sbi);
304 struct nat_entry *e;
Jaegeuk Kim88bd02c2014-09-15 14:50:48 -0700305 bool need_update = true;
Jaegeuk Kimb6fe5872014-06-04 00:39:42 +0900306
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700307 down_read(&nm_i->nat_tree_lock);
Jaegeuk Kim88bd02c2014-09-15 14:50:48 -0700308 e = __lookup_nat_cache(nm_i, ino);
309 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
310 (get_nat_flag(e, IS_CHECKPOINTED) ||
311 get_nat_flag(e, HAS_FSYNCED_INODE)))
312 need_update = false;
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700313 up_read(&nm_i->nat_tree_lock);
Jaegeuk Kim88bd02c2014-09-15 14:50:48 -0700314 return need_update;
Jaegeuk Kimb6fe5872014-06-04 00:39:42 +0900315}
316
Yunlei He12f9ef32017-11-10 13:36:51 -0800317/* must be locked by nat_tree_lock */
Chao Yu1515aef2016-02-19 18:12:28 +0800318static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900319 struct f2fs_nat_entry *ne)
320{
Chao Yu1515aef2016-02-19 18:12:28 +0800321 struct f2fs_nm_info *nm_i = NM_I(sbi);
Yunlei He12f9ef32017-11-10 13:36:51 -0800322 struct nat_entry *new, *e;
Jaegeuk Kim9be32d72014-12-05 10:39:49 -0800323
Yunlei He12f9ef32017-11-10 13:36:51 -0800324 new = __alloc_nat_entry(nid, false);
325 if (!new)
326 return;
327
328 down_write(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900329 e = __lookup_nat_cache(nm_i, nid);
Yunlei He12f9ef32017-11-10 13:36:51 -0800330 if (!e)
331 e = __init_nat_entry(nm_i, new, ne, false);
332 else
Eric Biggers0c0b4712016-10-11 10:36:12 -0700333 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
334 nat_get_blkaddr(e) !=
335 le32_to_cpu(ne->block_addr) ||
Chao Yu1515aef2016-02-19 18:12:28 +0800336 nat_get_version(e) != ne->version);
Yunlei He12f9ef32017-11-10 13:36:51 -0800337 up_write(&nm_i->nat_tree_lock);
338 if (e != new)
339 __free_nat_entry(new);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900340}
341
342static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
Jaegeuk Kim479f40c2014-03-20 21:52:53 +0900343 block_t new_blkaddr, bool fsync_done)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900344{
345 struct f2fs_nm_info *nm_i = NM_I(sbi);
346 struct nat_entry *e;
Yunlei He12f9ef32017-11-10 13:36:51 -0800347 struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
Jaegeuk Kim9be32d72014-12-05 10:39:49 -0800348
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700349 down_write(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900350 e = __lookup_nat_cache(nm_i, ni->nid);
351 if (!e) {
Yunlei He12f9ef32017-11-10 13:36:51 -0800352 e = __init_nat_entry(nm_i, new, NULL, true);
Chao Yu5c27f4e2014-12-18 17:37:21 +0800353 copy_node_info(&e->ni, ni);
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700354 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900355 } else if (new_blkaddr == NEW_ADDR) {
356 /*
357 * when nid is reallocated,
358 * previous nat entry can be remained in nat cache.
359 * So, reinitialize it with new information.
360 */
Chao Yu5c27f4e2014-12-18 17:37:21 +0800361 copy_node_info(&e->ni, ni);
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700362 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900363 }
Yunlei He12f9ef32017-11-10 13:36:51 -0800364 /* let's free early to reduce memory consumption */
365 if (e != new)
366 __free_nat_entry(new);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900367
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900368 /* sanity check */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700369 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
370 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900371 new_blkaddr == NULL_ADDR);
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700372 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900373 new_blkaddr == NEW_ADDR);
Chao Yue1da7872018-06-05 17:44:11 +0800374 f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900375 new_blkaddr == NEW_ADDR);
376
arter97e1c42042014-08-06 23:22:50 +0900377 /* increment version no as node is removed */
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900378 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
379 unsigned char version = nat_get_version(e);
380 nat_set_version(e, inc_node_version(version));
381 }
382
383 /* change address */
384 nat_set_blkaddr(e, new_blkaddr);
Chao Yue1da7872018-06-05 17:44:11 +0800385 if (!is_valid_data_blkaddr(sbi, new_blkaddr))
Jaegeuk Kim88bd02c2014-09-15 14:50:48 -0700386 set_nat_flag(e, IS_CHECKPOINTED, false);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900387 __set_nat_cache_dirty(nm_i, e);
Jaegeuk Kim479f40c2014-03-20 21:52:53 +0900388
389 /* update fsync_mark if its inode nat entry is still alive */
Chao Yud5b692b2015-04-30 18:35:50 +0800390 if (ni->nid != ni->ino)
391 e = __lookup_nat_cache(nm_i, ni->ino);
Jaegeuk Kim88bd02c2014-09-15 14:50:48 -0700392 if (e) {
393 if (fsync_done && ni->nid == ni->ino)
394 set_nat_flag(e, HAS_FSYNCED_INODE, true);
395 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
396 }
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700397 up_write(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900398}
399
Chao Yu4d57b862018-05-30 00:20:41 +0800400int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900401{
402 struct f2fs_nm_info *nm_i = NM_I(sbi);
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -0700403 int nr = nr_shrink;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900404
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700405 if (!down_write_trylock(&nm_i->nat_tree_lock))
406 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900407
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900408 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
409 struct nat_entry *ne;
410 ne = list_first_entry(&nm_i->nat_entries,
411 struct nat_entry, list);
412 __del_from_nat_cache(nm_i, ne);
413 nr_shrink--;
414 }
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700415 up_write(&nm_i->nat_tree_lock);
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -0700416 return nr - nr_shrink;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900417}
418
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900419/*
arter97e1c42042014-08-06 23:22:50 +0900420 * This function always returns success
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900421 */
Chao Yu77357302018-07-17 00:02:17 +0800422int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
Chao Yu4d57b862018-05-30 00:20:41 +0800423 struct node_info *ni)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900424{
425 struct f2fs_nm_info *nm_i = NM_I(sbi);
426 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +0800427 struct f2fs_journal *journal = curseg->journal;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900428 nid_t start_nid = START_NID(nid);
429 struct f2fs_nat_block *nat_blk;
430 struct page *page = NULL;
431 struct f2fs_nat_entry ne;
432 struct nat_entry *e;
Yunlei He66a82d12017-04-22 18:06:26 +0800433 pgoff_t index;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900434 int i;
435
436 ni->nid = nid;
437
438 /* Check nat cache */
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700439 down_read(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900440 e = __lookup_nat_cache(nm_i, nid);
441 if (e) {
442 ni->ino = nat_get_ino(e);
443 ni->blk_addr = nat_get_blkaddr(e);
444 ni->version = nat_get_version(e);
Jaegeuk Kimb873b792016-08-04 11:38:25 -0700445 up_read(&nm_i->nat_tree_lock);
Chao Yu77357302018-07-17 00:02:17 +0800446 return 0;
Chao Yu1515aef2016-02-19 18:12:28 +0800447 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900448
Jaegeuk Kim3547ea92014-12-30 23:08:26 -0800449 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
450
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900451 /* Check current segment summary */
Chao Yub7ad7512016-02-19 18:08:46 +0800452 down_read(&curseg->journal_rwsem);
Chao Yu4d57b862018-05-30 00:20:41 +0800453 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900454 if (i >= 0) {
Chao Yudfc08a12016-02-14 18:50:40 +0800455 ne = nat_in_journal(journal, i);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900456 node_info_from_raw_nat(ni, &ne);
457 }
Chao Yub7ad7512016-02-19 18:08:46 +0800458 up_read(&curseg->journal_rwsem);
Yunlei He66a82d12017-04-22 18:06:26 +0800459 if (i >= 0) {
460 up_read(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900461 goto cache;
Yunlei He66a82d12017-04-22 18:06:26 +0800462 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900463
464 /* Fill node_info from nat page */
Yunlei He66a82d12017-04-22 18:06:26 +0800465 index = current_nat_addr(sbi, nid);
466 up_read(&nm_i->nat_tree_lock);
467
Chao Yu4d57b862018-05-30 00:20:41 +0800468 page = f2fs_get_meta_page(sbi, index);
Chao Yu77357302018-07-17 00:02:17 +0800469 if (IS_ERR(page))
470 return PTR_ERR(page);
471
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900472 nat_blk = (struct f2fs_nat_block *)page_address(page);
473 ne = nat_blk->entries[nid - start_nid];
474 node_info_from_raw_nat(ni, &ne);
475 f2fs_put_page(page, 1);
476cache:
477 /* cache nat entry */
Chao Yu1515aef2016-02-19 18:12:28 +0800478 cache_nat_entry(sbi, nid, &ne);
Chao Yu77357302018-07-17 00:02:17 +0800479 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900480}
481
Jaegeuk Kim79344ef2016-05-06 16:19:43 -0700482/*
483 * readahead MAX_RA_NODE number of node pages.
484 */
Chao Yu4d57b862018-05-30 00:20:41 +0800485static void f2fs_ra_node_pages(struct page *parent, int start, int n)
Jaegeuk Kim79344ef2016-05-06 16:19:43 -0700486{
487 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
488 struct blk_plug plug;
489 int i, end;
490 nid_t nid;
491
492 blk_start_plug(&plug);
493
494 /* Then, try readahead for siblings of the desired node */
495 end = start + n;
496 end = min(end, NIDS_PER_BLOCK);
497 for (i = start; i < end; i++) {
498 nid = get_nid(parent, i, false);
Chao Yu4d57b862018-05-30 00:20:41 +0800499 f2fs_ra_node_page(sbi, nid);
Jaegeuk Kim79344ef2016-05-06 16:19:43 -0700500 }
501
502 blk_finish_plug(&plug);
503}
504
Chao Yu4d57b862018-05-30 00:20:41 +0800505pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
Chao Yu3cf45742016-01-26 15:40:44 +0800506{
507 const long direct_index = ADDRS_PER_INODE(dn->inode);
508 const long direct_blks = ADDRS_PER_BLOCK;
509 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
510 unsigned int skipped_unit = ADDRS_PER_BLOCK;
511 int cur_level = dn->cur_level;
512 int max_level = dn->max_level;
513 pgoff_t base = 0;
514
515 if (!dn->max_level)
516 return pgofs + 1;
517
518 while (max_level-- > cur_level)
519 skipped_unit *= NIDS_PER_BLOCK;
520
521 switch (dn->max_level) {
522 case 3:
523 base += 2 * indirect_blks;
524 case 2:
525 base += 2 * direct_blks;
526 case 1:
527 base += direct_index;
528 break;
529 default:
530 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
531 }
532
533 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
534}
535
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900536/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900537 * The maximum depth is four.
538 * Offset[0] will have raw inode offset.
539 */
Chao Yu81ca7352016-01-26 15:39:35 +0800540static int get_node_path(struct inode *inode, long block,
Jaegeuk Kimde936532013-08-12 21:08:03 +0900541 int offset[4], unsigned int noffset[4])
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900542{
Chao Yu81ca7352016-01-26 15:39:35 +0800543 const long direct_index = ADDRS_PER_INODE(inode);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900544 const long direct_blks = ADDRS_PER_BLOCK;
545 const long dptrs_per_blk = NIDS_PER_BLOCK;
546 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
547 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
548 int n = 0;
549 int level = 0;
550
551 noffset[0] = 0;
552
553 if (block < direct_index) {
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900554 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900555 goto got;
556 }
557 block -= direct_index;
558 if (block < direct_blks) {
559 offset[n++] = NODE_DIR1_BLOCK;
560 noffset[n] = 1;
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900561 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900562 level = 1;
563 goto got;
564 }
565 block -= direct_blks;
566 if (block < direct_blks) {
567 offset[n++] = NODE_DIR2_BLOCK;
568 noffset[n] = 2;
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900569 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900570 level = 1;
571 goto got;
572 }
573 block -= direct_blks;
574 if (block < indirect_blks) {
575 offset[n++] = NODE_IND1_BLOCK;
576 noffset[n] = 3;
577 offset[n++] = block / direct_blks;
578 noffset[n] = 4 + offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900579 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900580 level = 2;
581 goto got;
582 }
583 block -= indirect_blks;
584 if (block < indirect_blks) {
585 offset[n++] = NODE_IND2_BLOCK;
586 noffset[n] = 4 + dptrs_per_blk;
587 offset[n++] = block / direct_blks;
588 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900589 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900590 level = 2;
591 goto got;
592 }
593 block -= indirect_blks;
594 if (block < dindirect_blks) {
595 offset[n++] = NODE_DIND_BLOCK;
596 noffset[n] = 5 + (dptrs_per_blk * 2);
597 offset[n++] = block / indirect_blks;
598 noffset[n] = 6 + (dptrs_per_blk * 2) +
599 offset[n - 1] * (dptrs_per_blk + 1);
600 offset[n++] = (block / direct_blks) % dptrs_per_blk;
601 noffset[n] = 7 + (dptrs_per_blk * 2) +
602 offset[n - 2] * (dptrs_per_blk + 1) +
603 offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900604 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900605 level = 3;
606 goto got;
607 } else {
Jaegeuk Kimadb6dc192017-08-21 13:51:32 -0700608 return -E2BIG;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900609 }
610got:
611 return level;
612}
613
614/*
615 * Caller should call f2fs_put_dnode(dn).
Chao Yu4f4124d2013-12-21 18:02:14 +0800616 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
617 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
Jaegeuk Kim39936832012-11-22 16:21:29 +0900618 * In the case of RDONLY_NODE, we don't need to care about mutex.
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900619 */
Chao Yu4d57b862018-05-30 00:20:41 +0800620int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900621{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700622 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900623 struct page *npage[4];
Jaegeuk Kimf1a3b982015-02-11 11:25:11 -0800624 struct page *parent = NULL;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900625 int offset[4];
626 unsigned int noffset[4];
627 nid_t nids[4];
Chao Yu3cf45742016-01-26 15:40:44 +0800628 int level, i = 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900629 int err = 0;
630
Chao Yu81ca7352016-01-26 15:39:35 +0800631 level = get_node_path(dn->inode, index, offset, noffset);
Jaegeuk Kimadb6dc192017-08-21 13:51:32 -0700632 if (level < 0)
633 return level;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900634
635 nids[0] = dn->inode->i_ino;
Jaegeuk Kim1646cfa2013-05-20 09:42:28 +0900636 npage[0] = dn->inode_page;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900637
Jaegeuk Kim1646cfa2013-05-20 09:42:28 +0900638 if (!npage[0]) {
Chao Yu4d57b862018-05-30 00:20:41 +0800639 npage[0] = f2fs_get_node_page(sbi, nids[0]);
Jaegeuk Kim1646cfa2013-05-20 09:42:28 +0900640 if (IS_ERR(npage[0]))
641 return PTR_ERR(npage[0]);
642 }
Jaegeuk Kimf1a3b982015-02-11 11:25:11 -0800643
644 /* if inline_data is set, should not report any block indices */
645 if (f2fs_has_inline_data(dn->inode) && index) {
Jaegeuk Kim76629162015-03-02 16:28:16 -0800646 err = -ENOENT;
Jaegeuk Kimf1a3b982015-02-11 11:25:11 -0800647 f2fs_put_page(npage[0], 1);
648 goto release_out;
649 }
650
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900651 parent = npage[0];
Changman Lee52c2db32013-02-20 07:47:06 +0900652 if (level != 0)
653 nids[1] = get_nid(parent, offset[0], true);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900654 dn->inode_page = npage[0];
655 dn->inode_page_locked = true;
656
657 /* get indirect or direct nodes */
658 for (i = 1; i <= level; i++) {
659 bool done = false;
660
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900661 if (!nids[i] && mode == ALLOC_NODE) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900662 /* alloc new node */
Chao Yu4d57b862018-05-30 00:20:41 +0800663 if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900664 err = -ENOSPC;
665 goto release_pages;
666 }
667
668 dn->nid = nids[i];
Chao Yu4d57b862018-05-30 00:20:41 +0800669 npage[i] = f2fs_new_node_page(dn, noffset[i]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900670 if (IS_ERR(npage[i])) {
Chao Yu4d57b862018-05-30 00:20:41 +0800671 f2fs_alloc_nid_failed(sbi, nids[i]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900672 err = PTR_ERR(npage[i]);
673 goto release_pages;
674 }
675
676 set_nid(parent, offset[i - 1], nids[i], i == 1);
Chao Yu4d57b862018-05-30 00:20:41 +0800677 f2fs_alloc_nid_done(sbi, nids[i]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900678 done = true;
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900679 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
Chao Yu4d57b862018-05-30 00:20:41 +0800680 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900681 if (IS_ERR(npage[i])) {
682 err = PTR_ERR(npage[i]);
683 goto release_pages;
684 }
685 done = true;
686 }
687 if (i == 1) {
688 dn->inode_page_locked = false;
689 unlock_page(parent);
690 } else {
691 f2fs_put_page(parent, 1);
692 }
693
694 if (!done) {
Chao Yu4d57b862018-05-30 00:20:41 +0800695 npage[i] = f2fs_get_node_page(sbi, nids[i]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900696 if (IS_ERR(npage[i])) {
697 err = PTR_ERR(npage[i]);
698 f2fs_put_page(npage[0], 0);
699 goto release_out;
700 }
701 }
702 if (i < level) {
703 parent = npage[i];
704 nids[i + 1] = get_nid(parent, offset[i], false);
705 }
706 }
707 dn->nid = nids[level];
708 dn->ofs_in_node = offset[level];
709 dn->node_page = npage[level];
Chao Yu7a2af762017-07-19 00:19:06 +0800710 dn->data_blkaddr = datablock_addr(dn->inode,
711 dn->node_page, dn->ofs_in_node);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900712 return 0;
713
714release_pages:
715 f2fs_put_page(parent, 1);
716 if (i > 1)
717 f2fs_put_page(npage[0], 0);
718release_out:
719 dn->inode_page = NULL;
720 dn->node_page = NULL;
Chao Yu3cf45742016-01-26 15:40:44 +0800721 if (err == -ENOENT) {
722 dn->cur_level = i;
723 dn->max_level = level;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -0700724 dn->ofs_in_node = offset[level];
Chao Yu3cf45742016-01-26 15:40:44 +0800725 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900726 return err;
727}
728
Chao Yu77357302018-07-17 00:02:17 +0800729static int truncate_node(struct dnode_of_data *dn)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900730{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700731 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900732 struct node_info ni;
Chao Yu77357302018-07-17 00:02:17 +0800733 int err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900734
Chao Yu77357302018-07-17 00:02:17 +0800735 err = f2fs_get_node_info(sbi, dn->nid, &ni);
736 if (err)
737 return err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900738
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900739 /* Deallocate node address */
Chao Yu4d57b862018-05-30 00:20:41 +0800740 f2fs_invalidate_blocks(sbi, ni.blk_addr);
Chao Yu000519f2017-07-06 01:11:31 +0800741 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
Jaegeuk Kim479f40c2014-03-20 21:52:53 +0900742 set_node_addr(sbi, &ni, NULL_ADDR, false);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900743
744 if (dn->nid == dn->inode->i_ino) {
Chao Yu4d57b862018-05-30 00:20:41 +0800745 f2fs_remove_orphan_inode(sbi, dn->nid);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900746 dec_valid_inode_count(sbi);
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700747 f2fs_inode_synced(dn->inode);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900748 }
Chao Yu000519f2017-07-06 01:11:31 +0800749
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900750 clear_node_page_dirty(dn->node_page);
Chao Yucaf00472015-01-28 17:48:42 +0800751 set_sbi_flag(sbi, SBI_IS_DIRTY);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900752
753 f2fs_put_page(dn->node_page, 1);
Jaegeuk Kimbf39c002014-01-22 20:41:57 +0900754
755 invalidate_mapping_pages(NODE_MAPPING(sbi),
756 dn->node_page->index, dn->node_page->index);
757
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900758 dn->node_page = NULL;
Namjae Jeon51dd6242013-04-20 01:28:52 +0900759 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
Chao Yu77357302018-07-17 00:02:17 +0800760
761 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900762}
763
764static int truncate_dnode(struct dnode_of_data *dn)
765{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900766 struct page *page;
Chao Yu77357302018-07-17 00:02:17 +0800767 int err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900768
769 if (dn->nid == 0)
770 return 1;
771
772 /* get direct node */
Chao Yu4d57b862018-05-30 00:20:41 +0800773 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900774 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
775 return 1;
776 else if (IS_ERR(page))
777 return PTR_ERR(page);
778
779 /* Make dnode_of_data for parameter */
780 dn->node_page = page;
781 dn->ofs_in_node = 0;
Chao Yu4d57b862018-05-30 00:20:41 +0800782 f2fs_truncate_data_blocks(dn);
Chao Yu77357302018-07-17 00:02:17 +0800783 err = truncate_node(dn);
784 if (err)
785 return err;
786
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900787 return 1;
788}
789
790static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
791 int ofs, int depth)
792{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900793 struct dnode_of_data rdn = *dn;
794 struct page *page;
795 struct f2fs_node *rn;
796 nid_t child_nid;
797 unsigned int child_nofs;
798 int freed = 0;
799 int i, ret;
800
801 if (dn->nid == 0)
802 return NIDS_PER_BLOCK + 1;
803
Namjae Jeon51dd6242013-04-20 01:28:52 +0900804 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
805
Chao Yu4d57b862018-05-30 00:20:41 +0800806 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900807 if (IS_ERR(page)) {
808 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900809 return PTR_ERR(page);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900810 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900811
Chao Yu4d57b862018-05-30 00:20:41 +0800812 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
Jaegeuk Kim79344ef2016-05-06 16:19:43 -0700813
Gu Zheng45590712013-07-15 17:57:38 +0800814 rn = F2FS_NODE(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900815 if (depth < 3) {
816 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
817 child_nid = le32_to_cpu(rn->in.nid[i]);
818 if (child_nid == 0)
819 continue;
820 rdn.nid = child_nid;
821 ret = truncate_dnode(&rdn);
822 if (ret < 0)
823 goto out_err;
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800824 if (set_nid(page, i, 0, false))
825 dn->node_changed = true;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900826 }
827 } else {
828 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
829 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
830 child_nid = le32_to_cpu(rn->in.nid[i]);
831 if (child_nid == 0) {
832 child_nofs += NIDS_PER_BLOCK + 1;
833 continue;
834 }
835 rdn.nid = child_nid;
836 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
837 if (ret == (NIDS_PER_BLOCK + 1)) {
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800838 if (set_nid(page, i, 0, false))
839 dn->node_changed = true;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900840 child_nofs += ret;
841 } else if (ret < 0 && ret != -ENOENT) {
842 goto out_err;
843 }
844 }
845 freed = child_nofs;
846 }
847
848 if (!ofs) {
849 /* remove current indirect node */
850 dn->node_page = page;
Chao Yu77357302018-07-17 00:02:17 +0800851 ret = truncate_node(dn);
852 if (ret)
853 goto out_err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900854 freed++;
855 } else {
856 f2fs_put_page(page, 1);
857 }
Namjae Jeon51dd6242013-04-20 01:28:52 +0900858 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900859 return freed;
860
861out_err:
862 f2fs_put_page(page, 1);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900863 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900864 return ret;
865}
866
867static int truncate_partial_nodes(struct dnode_of_data *dn,
868 struct f2fs_inode *ri, int *offset, int depth)
869{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900870 struct page *pages[2];
871 nid_t nid[3];
872 nid_t child_nid;
873 int err = 0;
874 int i;
875 int idx = depth - 2;
876
877 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
878 if (!nid[0])
879 return 0;
880
881 /* get indirect nodes in the path */
shifei10.gea225dca2013-10-29 15:32:34 +0800882 for (i = 0; i < idx + 1; i++) {
arter97e1c42042014-08-06 23:22:50 +0900883 /* reference count'll be increased */
Chao Yu4d57b862018-05-30 00:20:41 +0800884 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900885 if (IS_ERR(pages[i])) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900886 err = PTR_ERR(pages[i]);
shifei10.gea225dca2013-10-29 15:32:34 +0800887 idx = i - 1;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900888 goto fail;
889 }
890 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
891 }
892
Chao Yu4d57b862018-05-30 00:20:41 +0800893 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
Jaegeuk Kim79344ef2016-05-06 16:19:43 -0700894
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900895 /* free direct nodes linked to a partial indirect node */
shifei10.gea225dca2013-10-29 15:32:34 +0800896 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900897 child_nid = get_nid(pages[idx], i, false);
898 if (!child_nid)
899 continue;
900 dn->nid = child_nid;
901 err = truncate_dnode(dn);
902 if (err < 0)
903 goto fail;
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800904 if (set_nid(pages[idx], i, 0, false))
905 dn->node_changed = true;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900906 }
907
shifei10.gea225dca2013-10-29 15:32:34 +0800908 if (offset[idx + 1] == 0) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900909 dn->node_page = pages[idx];
910 dn->nid = nid[idx];
Chao Yu77357302018-07-17 00:02:17 +0800911 err = truncate_node(dn);
912 if (err)
913 goto fail;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900914 } else {
915 f2fs_put_page(pages[idx], 1);
916 }
917 offset[idx]++;
shifei10.gea225dca2013-10-29 15:32:34 +0800918 offset[idx + 1] = 0;
919 idx--;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900920fail:
shifei10.gea225dca2013-10-29 15:32:34 +0800921 for (i = idx; i >= 0; i--)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900922 f2fs_put_page(pages[i], 1);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900923
924 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
925
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900926 return err;
927}
928
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900929/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900930 * All the block addresses of data and nodes should be nullified.
931 */
Chao Yu4d57b862018-05-30 00:20:41 +0800932int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900933{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700934 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900935 int err = 0, cont = 1;
936 int level, offset[4], noffset[4];
Jaegeuk Kim7dd690c2013-02-12 07:28:55 +0900937 unsigned int nofs = 0;
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900938 struct f2fs_inode *ri;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900939 struct dnode_of_data dn;
940 struct page *page;
941
Namjae Jeon51dd6242013-04-20 01:28:52 +0900942 trace_f2fs_truncate_inode_blocks_enter(inode, from);
943
Chao Yu81ca7352016-01-26 15:39:35 +0800944 level = get_node_path(inode, from, offset, noffset);
Jaegeuk Kimadb6dc192017-08-21 13:51:32 -0700945 if (level < 0)
946 return level;
Jaegeuk Kimff373552016-03-29 16:13:45 -0700947
Chao Yu4d57b862018-05-30 00:20:41 +0800948 page = f2fs_get_node_page(sbi, inode->i_ino);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900949 if (IS_ERR(page)) {
950 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900951 return PTR_ERR(page);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900952 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900953
954 set_new_dnode(&dn, inode, page, NULL, 0);
955 unlock_page(page);
956
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900957 ri = F2FS_INODE(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900958 switch (level) {
959 case 0:
960 case 1:
961 nofs = noffset[1];
962 break;
963 case 2:
964 nofs = noffset[1];
965 if (!offset[level - 1])
966 goto skip_partial;
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900967 err = truncate_partial_nodes(&dn, ri, offset, level);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900968 if (err < 0 && err != -ENOENT)
969 goto fail;
970 nofs += 1 + NIDS_PER_BLOCK;
971 break;
972 case 3:
973 nofs = 5 + 2 * NIDS_PER_BLOCK;
974 if (!offset[level - 1])
975 goto skip_partial;
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900976 err = truncate_partial_nodes(&dn, ri, offset, level);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900977 if (err < 0 && err != -ENOENT)
978 goto fail;
979 break;
980 default:
981 BUG();
982 }
983
984skip_partial:
985 while (cont) {
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900986 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900987 switch (offset[0]) {
988 case NODE_DIR1_BLOCK:
989 case NODE_DIR2_BLOCK:
990 err = truncate_dnode(&dn);
991 break;
992
993 case NODE_IND1_BLOCK:
994 case NODE_IND2_BLOCK:
995 err = truncate_nodes(&dn, nofs, offset[1], 2);
996 break;
997
998 case NODE_DIND_BLOCK:
999 err = truncate_nodes(&dn, nofs, offset[1], 3);
1000 cont = 0;
1001 break;
1002
1003 default:
1004 BUG();
1005 }
1006 if (err < 0 && err != -ENOENT)
1007 goto fail;
1008 if (offset[1] == 0 &&
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +09001009 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001010 lock_page(page);
Jaegeuk Kimff373552016-03-29 16:13:45 -07001011 BUG_ON(page->mapping != NODE_MAPPING(sbi));
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08001012 f2fs_wait_on_page_writeback(page, NODE, true);
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +09001013 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001014 set_page_dirty(page);
1015 unlock_page(page);
1016 }
1017 offset[1] = 0;
1018 offset[0]++;
1019 nofs += err;
1020 }
1021fail:
1022 f2fs_put_page(page, 0);
Namjae Jeon51dd6242013-04-20 01:28:52 +09001023 trace_f2fs_truncate_inode_blocks_exit(inode, err);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001024 return err > 0 ? 0 : err;
1025}
1026
Jaegeuk Kim9c77f752017-10-19 11:48:57 -07001027/* caller must lock inode page */
Chao Yu4d57b862018-05-30 00:20:41 +08001028int f2fs_truncate_xattr_node(struct inode *inode)
Jaegeuk Kim4f16fb02013-08-14 20:40:06 +09001029{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001030 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim4f16fb02013-08-14 20:40:06 +09001031 nid_t nid = F2FS_I(inode)->i_xattr_nid;
1032 struct dnode_of_data dn;
1033 struct page *npage;
Chao Yu77357302018-07-17 00:02:17 +08001034 int err;
Jaegeuk Kim4f16fb02013-08-14 20:40:06 +09001035
1036 if (!nid)
1037 return 0;
1038
Chao Yu4d57b862018-05-30 00:20:41 +08001039 npage = f2fs_get_node_page(sbi, nid);
Jaegeuk Kim4f16fb02013-08-14 20:40:06 +09001040 if (IS_ERR(npage))
1041 return PTR_ERR(npage);
1042
Chao Yu77357302018-07-17 00:02:17 +08001043 set_new_dnode(&dn, inode, NULL, npage, nid);
1044 err = truncate_node(&dn);
1045 if (err) {
1046 f2fs_put_page(npage, 1);
1047 return err;
1048 }
1049
Jaegeuk Kim205b9822016-05-20 09:52:20 -07001050 f2fs_i_xnid_write(inode, 0);
Jaegeuk Kim65985d92013-08-14 21:57:27 +09001051
Jaegeuk Kim4f16fb02013-08-14 20:40:06 +09001052 return 0;
1053}
1054
Jaegeuk Kim39936832012-11-22 16:21:29 +09001055/*
Chao Yu4f4124d2013-12-21 18:02:14 +08001056 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1057 * f2fs_unlock_op().
Jaegeuk Kim39936832012-11-22 16:21:29 +09001058 */
Chao Yu4d57b862018-05-30 00:20:41 +08001059int f2fs_remove_inode_page(struct inode *inode)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001060{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001061 struct dnode_of_data dn;
Chao Yu13ec7292015-08-24 17:40:45 +08001062 int err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001063
Jaegeuk Kimc2e69582014-08-25 14:45:59 -07001064 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
Chao Yu4d57b862018-05-30 00:20:41 +08001065 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
Chao Yu13ec7292015-08-24 17:40:45 +08001066 if (err)
1067 return err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001068
Chao Yu4d57b862018-05-30 00:20:41 +08001069 err = f2fs_truncate_xattr_node(inode);
Chao Yu13ec7292015-08-24 17:40:45 +08001070 if (err) {
Jaegeuk Kimc2e69582014-08-25 14:45:59 -07001071 f2fs_put_dnode(&dn);
Chao Yu13ec7292015-08-24 17:40:45 +08001072 return err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001073 }
Jaegeuk Kimc2e69582014-08-25 14:45:59 -07001074
1075 /* remove potential inline_data blocks */
1076 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1077 S_ISLNK(inode->i_mode))
Chao Yu4d57b862018-05-30 00:20:41 +08001078 f2fs_truncate_data_blocks_range(&dn, 1);
Jaegeuk Kimc2e69582014-08-25 14:45:59 -07001079
arter97e1c42042014-08-06 23:22:50 +09001080 /* 0 is possible, after f2fs_new_inode() has failed */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07001081 f2fs_bug_on(F2FS_I_SB(inode),
Chao Yu0eb0ada2017-06-14 23:00:56 +08001082 inode->i_blocks != 0 && inode->i_blocks != 8);
Jaegeuk Kimc2e69582014-08-25 14:45:59 -07001083
1084 /* will put inode & node pages */
Chao Yu77357302018-07-17 00:02:17 +08001085 err = truncate_node(&dn);
1086 if (err) {
1087 f2fs_put_dnode(&dn);
1088 return err;
1089 }
Chao Yu13ec7292015-08-24 17:40:45 +08001090 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001091}
1092
Chao Yu4d57b862018-05-30 00:20:41 +08001093struct page *f2fs_new_inode_page(struct inode *inode)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001094{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001095 struct dnode_of_data dn;
1096
1097 /* allocate inode page for new inode */
1098 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
Jaegeuk Kim44a83ff2013-05-20 10:10:29 +09001099
1100 /* caller should f2fs_put_page(page, 1); */
Chao Yu4d57b862018-05-30 00:20:41 +08001101 return f2fs_new_node_page(&dn, 0);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001102}
1103
Chao Yu4d57b862018-05-30 00:20:41 +08001104struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001105{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001106 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kim25cc5d32017-02-13 17:02:44 -08001107 struct node_info new_ni;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001108 struct page *page;
1109 int err;
1110
Jaegeuk Kim91942322016-05-20 10:13:22 -07001111 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001112 return ERR_PTR(-EPERM);
1113
Jaegeuk Kim300e1292016-04-29 16:11:53 -07001114 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001115 if (!page)
1116 return ERR_PTR(-ENOMEM);
1117
Chao Yu0abd6752017-07-09 00:13:07 +08001118 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
Jaegeuk Kim9c027402013-08-12 16:00:46 +09001119 goto fail;
Chao Yu0abd6752017-07-09 00:13:07 +08001120
Jaegeuk Kim25cc5d32017-02-13 17:02:44 -08001121#ifdef CONFIG_F2FS_CHECK_FS
Chao Yu77357302018-07-17 00:02:17 +08001122 err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
1123 if (err) {
1124 dec_valid_node_count(sbi, dn->inode, !ofs);
1125 goto fail;
1126 }
Jaegeuk Kim25cc5d32017-02-13 17:02:44 -08001127 f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
1128#endif
1129 new_ni.nid = dn->nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001130 new_ni.ino = dn->inode->i_ino;
Jaegeuk Kim25cc5d32017-02-13 17:02:44 -08001131 new_ni.blk_addr = NULL_ADDR;
1132 new_ni.flag = 0;
1133 new_ni.version = 0;
Jaegeuk Kim479f40c2014-03-20 21:52:53 +09001134 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
Jaegeuk Kim9c027402013-08-12 16:00:46 +09001135
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08001136 f2fs_wait_on_page_writeback(page, NODE, true);
Jaegeuk Kim9c027402013-08-12 16:00:46 +09001137 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
Chao Yuc5667572018-03-09 14:24:22 +08001138 set_cold_node(page, S_ISDIR(dn->inode->i_mode));
Jaegeuk Kim237c0792016-06-30 18:49:15 -07001139 if (!PageUptodate(page))
1140 SetPageUptodate(page);
Jaegeuk Kim12719ae2016-01-07 13:23:12 -08001141 if (set_page_dirty(page))
1142 dn->node_changed = true;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001143
Chao Yu4bc8e9b2014-03-17 16:35:06 +08001144 if (f2fs_has_xattr_block(ofs))
Jaegeuk Kim205b9822016-05-20 09:52:20 -07001145 f2fs_i_xnid_write(dn->inode, dn->nid);
Jaegeuk Kim479bd732013-08-12 16:04:53 +09001146
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001147 if (ofs == 0)
1148 inc_valid_inode_count(sbi);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001149 return page;
1150
1151fail:
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +09001152 clear_node_page_dirty(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001153 f2fs_put_page(page, 1);
1154 return ERR_PTR(err);
1155}
1156
Jaegeuk Kim56ae6742013-03-31 12:47:20 +09001157/*
1158 * Caller should do after getting the following values.
1159 * 0: f2fs_put_page(page, 0)
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001160 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
Jaegeuk Kim56ae6742013-03-31 12:47:20 +09001161 */
Mike Christie04d328d2016-06-05 14:31:55 -05001162static int read_node_page(struct page *page, int op_flags)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001163{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001164 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001165 struct node_info ni;
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001166 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001167 .sbi = sbi,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001168 .type = NODE,
Mike Christie04d328d2016-06-05 14:31:55 -05001169 .op = REQ_OP_READ,
1170 .op_flags = op_flags,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001171 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001172 .encrypted_page = NULL,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001173 };
Chao Yu77357302018-07-17 00:02:17 +08001174 int err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001175
Weichao Guo54c55c42018-03-09 23:10:21 +08001176 if (PageUptodate(page)) {
1177#ifdef CONFIG_F2FS_CHECK_FS
1178 f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
1179#endif
Jaegeuk Kim3bdad3c2016-06-30 19:04:16 -07001180 return LOCKED_PAGE;
Weichao Guo54c55c42018-03-09 23:10:21 +08001181 }
Jaegeuk Kim3bdad3c2016-06-30 19:04:16 -07001182
Chao Yu77357302018-07-17 00:02:17 +08001183 err = f2fs_get_node_info(sbi, page->index, &ni);
1184 if (err)
1185 return err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001186
Jaegeuk Kim83a3bfd2018-06-21 13:46:23 -07001187 if (unlikely(ni.blk_addr == NULL_ADDR) ||
1188 is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001189 ClearPageUptodate(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001190 return -ENOENT;
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001191 }
1192
Chao Yu7a9d7542016-02-22 18:36:38 +08001193 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001194 return f2fs_submit_page_bio(&fio);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001195}
1196
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001197/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001198 * Readahead a node page
1199 */
Chao Yu4d57b862018-05-30 00:20:41 +08001200void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001201{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001202 struct page *apage;
Jaegeuk Kim56ae6742013-03-31 12:47:20 +09001203 int err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001204
Chao Yue8458722016-01-08 20:13:37 +08001205 if (!nid)
1206 return;
Chao Yu4d57b862018-05-30 00:20:41 +08001207 if (f2fs_check_nid_range(sbi, nid))
Jaegeuk Kima4f843b2018-04-23 23:02:31 -06001208 return;
Chao Yue8458722016-01-08 20:13:37 +08001209
Fan Li999270d2016-02-29 14:29:51 +08001210 rcu_read_lock();
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001211 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->i_pages, nid);
Fan Li999270d2016-02-29 14:29:51 +08001212 rcu_read_unlock();
1213 if (apage)
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001214 return;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001215
Jaegeuk Kim300e1292016-04-29 16:11:53 -07001216 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001217 if (!apage)
1218 return;
1219
Christoph Hellwig70246282016-07-19 11:28:41 +02001220 err = read_node_page(apage, REQ_RAHEAD);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001221 f2fs_put_page(apage, err ? 1 : 0);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001222}
1223
Jaegeuk Kim17a0ee52016-03-08 09:04:35 -08001224static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
Chao Yu0e022ea2016-01-05 16:52:46 +08001225 struct page *parent, int start)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001226{
Jaegeuk Kim56ae6742013-03-31 12:47:20 +09001227 struct page *page;
1228 int err;
Jaegeuk Kim4aa69d52015-12-23 14:17:47 -08001229
1230 if (!nid)
1231 return ERR_PTR(-ENOENT);
Chao Yu4d57b862018-05-30 00:20:41 +08001232 if (f2fs_check_nid_range(sbi, nid))
Jaegeuk Kima4f843b2018-04-23 23:02:31 -06001233 return ERR_PTR(-EINVAL);
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001234repeat:
Jaegeuk Kim300e1292016-04-29 16:11:53 -07001235 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001236 if (!page)
1237 return ERR_PTR(-ENOMEM);
1238
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001239 err = read_node_page(page, 0);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001240 if (err < 0) {
1241 f2fs_put_page(page, 1);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001242 return ERR_PTR(err);
Chao Yue1c51b9f2015-12-11 16:08:22 +08001243 } else if (err == LOCKED_PAGE) {
Chao Yu1f258ec2017-06-07 11:17:35 +08001244 err = 0;
Chao Yue1c51b9f2015-12-11 16:08:22 +08001245 goto page_hit;
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001246 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001247
Chao Yu0e022ea2016-01-05 16:52:46 +08001248 if (parent)
Chao Yu4d57b862018-05-30 00:20:41 +08001249 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
Chao Yu0e022ea2016-01-05 16:52:46 +08001250
Chao Yue1c51b9f2015-12-11 16:08:22 +08001251 lock_page(page);
1252
Jaegeuk Kim4ef51a82014-01-21 18:51:16 +09001253 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001254 f2fs_put_page(page, 1);
1255 goto repeat;
1256 }
Chao Yu1563ac72016-07-03 22:05:12 +08001257
Chao Yu1f258ec2017-06-07 11:17:35 +08001258 if (unlikely(!PageUptodate(page))) {
1259 err = -EIO;
Chao Yu1563ac72016-07-03 22:05:12 +08001260 goto out_err;
Chao Yu1f258ec2017-06-07 11:17:35 +08001261 }
Chao Yu704956e2017-07-31 20:19:09 +08001262
1263 if (!f2fs_inode_chksum_verify(sbi, page)) {
1264 err = -EBADMSG;
1265 goto out_err;
1266 }
Chao Yue1c51b9f2015-12-11 16:08:22 +08001267page_hit:
Yunlong Song0c9df7f2016-05-26 19:40:29 +08001268 if(unlikely(nid != nid_of_node(page))) {
Chao Yu1f258ec2017-06-07 11:17:35 +08001269 f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, "
1270 "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1271 nid, nid_of_node(page), ino_of_node(page),
1272 ofs_of_node(page), cpver_of_node(page),
1273 next_blkaddr_of_node(page));
Chao Yu1f258ec2017-06-07 11:17:35 +08001274 err = -EINVAL;
Yunlong Song0c9df7f2016-05-26 19:40:29 +08001275out_err:
Jaegeuk Kimee605232017-08-31 16:54:51 -07001276 ClearPageUptodate(page);
Yunlong Song0c9df7f2016-05-26 19:40:29 +08001277 f2fs_put_page(page, 1);
Chao Yu1f258ec2017-06-07 11:17:35 +08001278 return ERR_PTR(err);
Yunlong Song0c9df7f2016-05-26 19:40:29 +08001279 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001280 return page;
1281}
1282
Chao Yu4d57b862018-05-30 00:20:41 +08001283struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
Chao Yu0e022ea2016-01-05 16:52:46 +08001284{
1285 return __get_node_page(sbi, nid, NULL, 0);
1286}
1287
Chao Yu4d57b862018-05-30 00:20:41 +08001288struct page *f2fs_get_node_page_ra(struct page *parent, int start)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001289{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001290 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
Chao Yu0e022ea2016-01-05 16:52:46 +08001291 nid_t nid = get_nid(parent, start, false);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001292
Chao Yu0e022ea2016-01-05 16:52:46 +08001293 return __get_node_page(sbi, nid, parent, start);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001294}
1295
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -08001296static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1297{
1298 struct inode *inode;
1299 struct page *page;
Chao Yu0f3311a2016-05-21 00:11:09 +08001300 int ret;
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -08001301
1302 /* should flush inline_data before evict_inode */
1303 inode = ilookup(sbi->sb, ino);
1304 if (!inode)
1305 return;
1306
Chao Yu01eccef2017-10-28 16:52:30 +08001307 page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1308 FGP_LOCK|FGP_NOWAIT, 0);
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -08001309 if (!page)
1310 goto iput_out;
1311
1312 if (!PageUptodate(page))
1313 goto page_out;
1314
1315 if (!PageDirty(page))
1316 goto page_out;
1317
1318 if (!clear_page_dirty_for_io(page))
1319 goto page_out;
1320
Chao Yu0f3311a2016-05-21 00:11:09 +08001321 ret = f2fs_write_inline_data(inode, page);
1322 inode_dec_dirty_pages(inode);
Chao Yu4d57b862018-05-30 00:20:41 +08001323 f2fs_remove_dirty_inode(inode);
Chao Yu0f3311a2016-05-21 00:11:09 +08001324 if (ret)
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -08001325 set_page_dirty(page);
1326page_out:
Jaegeuk Kim4a6de502016-03-30 11:25:31 -07001327 f2fs_put_page(page, 1);
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -08001328iput_out:
1329 iput(inode);
1330}
1331
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001332static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001333{
Jan Kara028a63a2017-11-15 17:34:51 -08001334 pgoff_t index;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001335 struct pagevec pvec;
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001336 struct page *last_page = NULL;
Jan Kara028a63a2017-11-15 17:34:51 -08001337 int nr_pages;
Jaegeuk Kim52681372016-04-13 16:24:44 -07001338
Mel Gorman86679822017-11-15 17:37:52 -08001339 pagevec_init(&pvec);
Jaegeuk Kim52681372016-04-13 16:24:44 -07001340 index = 0;
Jaegeuk Kim52681372016-04-13 16:24:44 -07001341
Jan Kara028a63a2017-11-15 17:34:51 -08001342 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
Jan Kara67fd7072017-11-15 17:35:19 -08001343 PAGECACHE_TAG_DIRTY))) {
Jan Kara028a63a2017-11-15 17:34:51 -08001344 int i;
Jaegeuk Kim52681372016-04-13 16:24:44 -07001345
1346 for (i = 0; i < nr_pages; i++) {
1347 struct page *page = pvec.pages[i];
1348
1349 if (unlikely(f2fs_cp_error(sbi))) {
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001350 f2fs_put_page(last_page, 0);
Jaegeuk Kim52681372016-04-13 16:24:44 -07001351 pagevec_release(&pvec);
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001352 return ERR_PTR(-EIO);
Jaegeuk Kim52681372016-04-13 16:24:44 -07001353 }
1354
1355 if (!IS_DNODE(page) || !is_cold_node(page))
1356 continue;
1357 if (ino_of_node(page) != ino)
1358 continue;
1359
1360 lock_page(page);
1361
1362 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1363continue_unlock:
1364 unlock_page(page);
1365 continue;
1366 }
1367 if (ino_of_node(page) != ino)
1368 goto continue_unlock;
1369
1370 if (!PageDirty(page)) {
1371 /* someone wrote it for us */
1372 goto continue_unlock;
1373 }
1374
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001375 if (last_page)
1376 f2fs_put_page(last_page, 0);
1377
1378 get_page(page);
1379 last_page = page;
1380 unlock_page(page);
1381 }
1382 pagevec_release(&pvec);
1383 cond_resched();
1384 }
1385 return last_page;
1386}
1387
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001388static int __write_node_page(struct page *page, bool atomic, bool *submitted,
Chao Yub0af6d42017-08-02 23:21:48 +08001389 struct writeback_control *wbc, bool do_balance,
1390 enum iostat_type io_type)
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001391{
1392 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1393 nid_t nid;
1394 struct node_info ni;
1395 struct f2fs_io_info fio = {
1396 .sbi = sbi,
Chao Yu39d787b2017-09-29 13:59:38 +08001397 .ino = ino_of_node(page),
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001398 .type = NODE,
1399 .op = REQ_OP_WRITE,
1400 .op_flags = wbc_to_write_flags(wbc),
1401 .page = page,
1402 .encrypted_page = NULL,
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001403 .submitted = false,
Chao Yub0af6d42017-08-02 23:21:48 +08001404 .io_type = io_type,
Yufen Yu578c6472018-01-09 19:33:39 +08001405 .io_wbc = wbc,
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001406 };
1407
1408 trace_f2fs_writepage(page, NODE);
1409
Jaegeuk Kim868de612018-05-04 18:04:22 -07001410 if (unlikely(f2fs_cp_error(sbi)))
1411 goto redirty_out;
Chao Yudb198ae2018-01-18 17:29:10 +08001412
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001413 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1414 goto redirty_out;
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001415
1416 /* get old block addr of this node page */
1417 nid = nid_of_node(page);
1418 f2fs_bug_on(sbi, page->index != nid);
1419
Chao Yu77357302018-07-17 00:02:17 +08001420 if (f2fs_get_node_info(sbi, nid, &ni))
1421 goto redirty_out;
1422
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001423 if (wbc->for_reclaim) {
1424 if (!down_read_trylock(&sbi->node_write))
1425 goto redirty_out;
1426 } else {
1427 down_read(&sbi->node_write);
1428 }
1429
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001430 /* This page is already truncated */
1431 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1432 ClearPageUptodate(page);
1433 dec_page_count(sbi, F2FS_DIRTY_NODES);
1434 up_read(&sbi->node_write);
1435 unlock_page(page);
1436 return 0;
1437 }
1438
Chao Yuc9b60782018-08-01 19:13:44 +08001439 if (__is_valid_data_blkaddr(ni.blk_addr) &&
1440 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
1441 goto redirty_out;
1442
Jaegeuk Kime7c75ab2017-02-02 18:18:06 -08001443 if (atomic && !test_opt(sbi, NOBARRIER))
1444 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1445
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001446 set_page_writeback(page);
Jaegeuk Kim17c50032018-04-11 23:09:04 -07001447 ClearPageError(page);
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001448 fio.old_blkaddr = ni.blk_addr;
Chao Yu4d57b862018-05-30 00:20:41 +08001449 f2fs_do_write_node_page(nid, &fio);
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001450 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1451 dec_page_count(sbi, F2FS_DIRTY_NODES);
1452 up_read(&sbi->node_write);
1453
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001454 if (wbc->for_reclaim) {
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07001455 f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0,
1456 page->index, NODE);
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001457 submitted = NULL;
1458 }
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001459
1460 unlock_page(page);
1461
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001462 if (unlikely(f2fs_cp_error(sbi))) {
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07001463 f2fs_submit_merged_write(sbi, NODE);
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001464 submitted = NULL;
1465 }
1466 if (submitted)
1467 *submitted = fio.submitted;
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001468
Yunlong Song401db792017-07-27 20:11:00 +08001469 if (do_balance)
1470 f2fs_balance_fs(sbi, false);
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001471 return 0;
1472
1473redirty_out:
1474 redirty_page_for_writepage(wbc, page);
1475 return AOP_WRITEPAGE_ACTIVATE;
1476}
1477
Chao Yu4d57b862018-05-30 00:20:41 +08001478void f2fs_move_node_page(struct page *node_page, int gc_type)
Yunlei Hef15194f2017-10-30 14:18:55 +08001479{
1480 if (gc_type == FG_GC) {
1481 struct writeback_control wbc = {
1482 .sync_mode = WB_SYNC_ALL,
1483 .nr_to_write = 1,
1484 .for_reclaim = 0,
1485 };
1486
1487 set_page_dirty(node_page);
1488 f2fs_wait_on_page_writeback(node_page, NODE, true);
1489
1490 f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
1491 if (!clear_page_dirty_for_io(node_page))
1492 goto out_page;
1493
1494 if (__write_node_page(node_page, false, NULL,
1495 &wbc, false, FS_GC_NODE_IO))
1496 unlock_page(node_page);
1497 goto release_page;
1498 } else {
1499 /* set page dirty and write it */
1500 if (!PageWriteback(node_page))
1501 set_page_dirty(node_page);
1502 }
1503out_page:
1504 unlock_page(node_page);
1505release_page:
1506 f2fs_put_page(node_page, 0);
1507}
1508
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001509static int f2fs_write_node_page(struct page *page,
1510 struct writeback_control *wbc)
1511{
Chao Yub0af6d42017-08-02 23:21:48 +08001512 return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
Jaegeuk Kimfaa24892017-02-02 18:27:17 -08001513}
1514
Chao Yu4d57b862018-05-30 00:20:41 +08001515int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001516 struct writeback_control *wbc, bool atomic)
1517{
Jan Kara028a63a2017-11-15 17:34:51 -08001518 pgoff_t index;
Jaegeuk Kim942fd312017-02-01 16:51:22 -08001519 pgoff_t last_idx = ULONG_MAX;
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001520 struct pagevec pvec;
1521 int ret = 0;
1522 struct page *last_page = NULL;
1523 bool marked = false;
Jaegeuk Kim26de9b12016-05-20 20:42:37 -07001524 nid_t ino = inode->i_ino;
Jan Kara028a63a2017-11-15 17:34:51 -08001525 int nr_pages;
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001526
1527 if (atomic) {
1528 last_page = last_fsync_dnode(sbi, ino);
1529 if (IS_ERR_OR_NULL(last_page))
1530 return PTR_ERR_OR_ZERO(last_page);
1531 }
1532retry:
Mel Gorman86679822017-11-15 17:37:52 -08001533 pagevec_init(&pvec);
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001534 index = 0;
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001535
Jan Kara028a63a2017-11-15 17:34:51 -08001536 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
Jan Kara67fd7072017-11-15 17:35:19 -08001537 PAGECACHE_TAG_DIRTY))) {
Jan Kara028a63a2017-11-15 17:34:51 -08001538 int i;
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001539
1540 for (i = 0; i < nr_pages; i++) {
1541 struct page *page = pvec.pages[i];
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001542 bool submitted = false;
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001543
1544 if (unlikely(f2fs_cp_error(sbi))) {
1545 f2fs_put_page(last_page, 0);
1546 pagevec_release(&pvec);
Chao Yu9de69272016-10-11 22:57:06 +08001547 ret = -EIO;
1548 goto out;
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001549 }
1550
1551 if (!IS_DNODE(page) || !is_cold_node(page))
1552 continue;
1553 if (ino_of_node(page) != ino)
1554 continue;
1555
1556 lock_page(page);
1557
1558 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1559continue_unlock:
1560 unlock_page(page);
1561 continue;
1562 }
1563 if (ino_of_node(page) != ino)
Jaegeuk Kim52681372016-04-13 16:24:44 -07001564 goto continue_unlock;
1565
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001566 if (!PageDirty(page) && page != last_page) {
1567 /* someone wrote it for us */
1568 goto continue_unlock;
1569 }
1570
1571 f2fs_wait_on_page_writeback(page, NODE, true);
1572 BUG_ON(PageWriteback(page));
1573
Jaegeuk Kimd29fd172017-04-12 12:02:00 -07001574 set_fsync_mark(page, 0);
1575 set_dentry_mark(page, 0);
1576
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001577 if (!atomic || page == last_page) {
1578 set_fsync_mark(page, 1);
Jaegeuk Kim26de9b12016-05-20 20:42:37 -07001579 if (IS_INODE(page)) {
1580 if (is_inode_flag_set(inode,
1581 FI_DIRTY_INODE))
Chao Yu4d57b862018-05-30 00:20:41 +08001582 f2fs_update_inode(inode, page);
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001583 set_dentry_mark(page,
Chao Yu4d57b862018-05-30 00:20:41 +08001584 f2fs_need_dentry_mark(sbi, ino));
Jaegeuk Kim26de9b12016-05-20 20:42:37 -07001585 }
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001586 /* may be written by other thread */
1587 if (!PageDirty(page))
1588 set_page_dirty(page);
1589 }
1590
1591 if (!clear_page_dirty_for_io(page))
1592 goto continue_unlock;
Jaegeuk Kim52681372016-04-13 16:24:44 -07001593
Jaegeuk Kime7c75ab2017-02-02 18:18:06 -08001594 ret = __write_node_page(page, atomic &&
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001595 page == last_page,
Chao Yub0af6d42017-08-02 23:21:48 +08001596 &submitted, wbc, true,
1597 FS_NODE_IO);
Jaegeuk Kimc267ec12016-04-15 09:25:04 -07001598 if (ret) {
Jaegeuk Kim52681372016-04-13 16:24:44 -07001599 unlock_page(page);
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001600 f2fs_put_page(last_page, 0);
1601 break;
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001602 } else if (submitted) {
Jaegeuk Kim942fd312017-02-01 16:51:22 -08001603 last_idx = page->index;
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001604 }
Chao Yu3f5f4952016-09-29 18:50:10 +08001605
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001606 if (page == last_page) {
1607 f2fs_put_page(page, 0);
1608 marked = true;
Jaegeuk Kim52681372016-04-13 16:24:44 -07001609 break;
Jaegeuk Kimc267ec12016-04-15 09:25:04 -07001610 }
Jaegeuk Kim52681372016-04-13 16:24:44 -07001611 }
1612 pagevec_release(&pvec);
1613 cond_resched();
1614
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001615 if (ret || marked)
Jaegeuk Kim52681372016-04-13 16:24:44 -07001616 break;
1617 }
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001618 if (!ret && atomic && !marked) {
1619 f2fs_msg(sbi->sb, KERN_DEBUG,
1620 "Retry to write fsync mark: ino=%u, idx=%lx",
1621 ino, last_page->index);
1622 lock_page(last_page);
Yunlei Hed40a43a2016-11-16 17:26:24 +08001623 f2fs_wait_on_page_writeback(last_page, NODE, true);
Jaegeuk Kim608514d2016-04-15 09:43:17 -07001624 set_page_dirty(last_page);
1625 unlock_page(last_page);
1626 goto retry;
1627 }
Chao Yu9de69272016-10-11 22:57:06 +08001628out:
Jaegeuk Kim942fd312017-02-01 16:51:22 -08001629 if (last_idx != ULONG_MAX)
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07001630 f2fs_submit_merged_write_cond(sbi, NULL, ino, last_idx, NODE);
Jaegeuk Kimc267ec12016-04-15 09:25:04 -07001631 return ret ? -EIO: 0;
Jaegeuk Kim52681372016-04-13 16:24:44 -07001632}
1633
Chao Yu4d57b862018-05-30 00:20:41 +08001634int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1635 struct writeback_control *wbc,
Chao Yub0af6d42017-08-02 23:21:48 +08001636 bool do_balance, enum iostat_type io_type)
Jaegeuk Kim52681372016-04-13 16:24:44 -07001637{
Jan Kara028a63a2017-11-15 17:34:51 -08001638 pgoff_t index;
Jaegeuk Kim52681372016-04-13 16:24:44 -07001639 struct pagevec pvec;
1640 int step = 0;
Jaegeuk Kim12bb0a82016-03-11 15:33:22 -08001641 int nwritten = 0;
Chao Yu3f5f4952016-09-29 18:50:10 +08001642 int ret = 0;
Chao Yuc29fd0c2018-06-04 23:20:36 +08001643 int nr_pages, done = 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001644
Mel Gorman86679822017-11-15 17:37:52 -08001645 pagevec_init(&pvec);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001646
1647next_step:
1648 index = 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001649
Chao Yuc29fd0c2018-06-04 23:20:36 +08001650 while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1651 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
Jan Kara028a63a2017-11-15 17:34:51 -08001652 int i;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001653
1654 for (i = 0; i < nr_pages; i++) {
1655 struct page *page = pvec.pages[i];
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001656 bool submitted = false;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001657
Chao Yuc29fd0c2018-06-04 23:20:36 +08001658 /* give a priority to WB_SYNC threads */
1659 if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1660 wbc->sync_mode == WB_SYNC_NONE) {
1661 done = 1;
1662 break;
1663 }
1664
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001665 /*
1666 * flushing sequence with step:
1667 * 0. indirect nodes
1668 * 1. dentry dnodes
1669 * 2. file dnodes
1670 */
1671 if (step == 0 && IS_DNODE(page))
1672 continue;
1673 if (step == 1 && (!IS_DNODE(page) ||
1674 is_cold_node(page)))
1675 continue;
1676 if (step == 2 && (!IS_DNODE(page) ||
1677 !is_cold_node(page)))
1678 continue;
Chao Yu9a4cbc92016-02-22 18:35:46 +08001679lock_node:
Chao Yu4b270a82018-07-04 18:04:10 +08001680 if (wbc->sync_mode == WB_SYNC_ALL)
1681 lock_page(page);
1682 else if (!trylock_page(page))
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001683 continue;
1684
Jaegeuk Kim4ef51a82014-01-21 18:51:16 +09001685 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001686continue_unlock:
1687 unlock_page(page);
1688 continue;
1689 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001690
1691 if (!PageDirty(page)) {
1692 /* someone wrote it for us */
1693 goto continue_unlock;
1694 }
1695
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -08001696 /* flush inline_data */
Jaegeuk Kim52681372016-04-13 16:24:44 -07001697 if (is_inline_node(page)) {
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -08001698 clear_inline_node(page);
1699 unlock_page(page);
1700 flush_inline_data(sbi, ino_of_node(page));
Chao Yu9a4cbc92016-02-22 18:35:46 +08001701 goto lock_node;
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -08001702 }
1703
Jaegeuk Kimfa3d2bd2016-01-28 11:48:52 -08001704 f2fs_wait_on_page_writeback(page, NODE, true);
1705
1706 BUG_ON(PageWriteback(page));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001707 if (!clear_page_dirty_for_io(page))
1708 goto continue_unlock;
1709
Jaegeuk Kim52681372016-04-13 16:24:44 -07001710 set_fsync_mark(page, 0);
1711 set_dentry_mark(page, 0);
Jaegeuk Kim52746512014-08-11 18:18:36 -07001712
Yunlong Song401db792017-07-27 20:11:00 +08001713 ret = __write_node_page(page, false, &submitted,
Chao Yub0af6d42017-08-02 23:21:48 +08001714 wbc, do_balance, io_type);
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001715 if (ret)
Jaegeuk Kim52746512014-08-11 18:18:36 -07001716 unlock_page(page);
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08001717 else if (submitted)
Chao Yu3f5f4952016-09-29 18:50:10 +08001718 nwritten++;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001719
1720 if (--wbc->nr_to_write == 0)
1721 break;
1722 }
1723 pagevec_release(&pvec);
1724 cond_resched();
1725
1726 if (wbc->nr_to_write == 0) {
1727 step = 2;
1728 break;
1729 }
1730 }
1731
1732 if (step < 2) {
1733 step++;
1734 goto next_step;
1735 }
Chao Yudb198ae2018-01-18 17:29:10 +08001736
Chao Yu3f5f4952016-09-29 18:50:10 +08001737 if (nwritten)
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07001738 f2fs_submit_merged_write(sbi, NODE);
Chao Yudb198ae2018-01-18 17:29:10 +08001739
1740 if (unlikely(f2fs_cp_error(sbi)))
1741 return -EIO;
Chao Yu3f5f4952016-09-29 18:50:10 +08001742 return ret;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001743}
1744
Chao Yu4d57b862018-05-30 00:20:41 +08001745int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001746{
Jan Kara028a63a2017-11-15 17:34:51 -08001747 pgoff_t index = 0;
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001748 struct pagevec pvec;
Miklos Szeredi280db3c2016-09-16 12:44:21 +02001749 int ret2, ret = 0;
Jan Kara028a63a2017-11-15 17:34:51 -08001750 int nr_pages;
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001751
Mel Gorman86679822017-11-15 17:37:52 -08001752 pagevec_init(&pvec);
Jaegeuk Kim4ef51a82014-01-21 18:51:16 +09001753
Jan Kara028a63a2017-11-15 17:34:51 -08001754 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
Jan Kara67fd7072017-11-15 17:35:19 -08001755 PAGECACHE_TAG_WRITEBACK))) {
Jan Kara028a63a2017-11-15 17:34:51 -08001756 int i;
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001757
1758 for (i = 0; i < nr_pages; i++) {
1759 struct page *page = pvec.pages[i];
1760
Chao Yu4bf08ff2013-11-04 10:28:33 +08001761 if (ino && ino_of_node(page) == ino) {
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08001762 f2fs_wait_on_page_writeback(page, NODE, true);
Chao Yu4bf08ff2013-11-04 10:28:33 +08001763 if (TestClearPageError(page))
1764 ret = -EIO;
1765 }
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001766 }
1767 pagevec_release(&pvec);
1768 cond_resched();
1769 }
1770
Miklos Szeredi280db3c2016-09-16 12:44:21 +02001771 ret2 = filemap_check_errors(NODE_MAPPING(sbi));
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001772 if (!ret)
1773 ret = ret2;
1774 return ret;
1775}
1776
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001777static int f2fs_write_node_pages(struct address_space *mapping,
1778 struct writeback_control *wbc)
1779{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001780 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07001781 struct blk_plug plug;
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001782 long diff;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001783
Chao Yu0771fcc2017-06-29 23:20:45 +08001784 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1785 goto skip_write;
1786
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +09001787 /* balancing f2fs's metadata in background */
1788 f2fs_balance_fs_bg(sbi);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001789
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001790 /* collect a number of dirty node pages and write together */
Jaegeuk Kim87d6f892014-03-18 12:40:49 +09001791 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001792 goto skip_write;
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001793
Chao Yuc29fd0c2018-06-04 23:20:36 +08001794 if (wbc->sync_mode == WB_SYNC_ALL)
1795 atomic_inc(&sbi->wb_sync_req[NODE]);
1796 else if (atomic_read(&sbi->wb_sync_req[NODE]))
1797 goto skip_write;
1798
Yunlei Hed31c7c32016-02-04 16:14:00 +08001799 trace_f2fs_writepages(mapping->host, wbc, NODE);
1800
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001801 diff = nr_pages_to_write(sbi, NODE, wbc);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07001802 blk_start_plug(&plug);
Chao Yu4d57b862018-05-30 00:20:41 +08001803 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07001804 blk_finish_plug(&plug);
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001805 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Chao Yuc29fd0c2018-06-04 23:20:36 +08001806
1807 if (wbc->sync_mode == WB_SYNC_ALL)
1808 atomic_dec(&sbi->wb_sync_req[NODE]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001809 return 0;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001810
1811skip_write:
1812 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
Yunlei Hed31c7c32016-02-04 16:14:00 +08001813 trace_f2fs_writepages(mapping->host, wbc, NODE);
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001814 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001815}
1816
1817static int f2fs_set_node_page_dirty(struct page *page)
1818{
Jaegeuk Kim26c6b882013-10-24 17:53:29 +09001819 trace_f2fs_set_page_dirty(page, NODE);
1820
Jaegeuk Kim237c0792016-06-30 18:49:15 -07001821 if (!PageUptodate(page))
1822 SetPageUptodate(page);
Weichao Guo54c55c42018-03-09 23:10:21 +08001823#ifdef CONFIG_F2FS_CHECK_FS
1824 if (IS_INODE(page))
1825 f2fs_inode_chksum_set(F2FS_P_SB(page), page);
1826#endif
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001827 if (!PageDirty(page)) {
Jaegeuk Kimb87078a2018-04-20 19:29:52 -07001828 __set_page_dirty_nobuffers(page);
Jaegeuk Kim40813632014-09-02 15:31:18 -07001829 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001830 SetPagePrivate(page);
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -08001831 f2fs_trace_pid(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001832 return 1;
1833 }
1834 return 0;
1835}
1836
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001837/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001838 * Structure of the f2fs node operations
1839 */
1840const struct address_space_operations f2fs_node_aops = {
1841 .writepage = f2fs_write_node_page,
1842 .writepages = f2fs_write_node_pages,
1843 .set_page_dirty = f2fs_set_node_page_dirty,
Chao Yu487261f2015-02-05 17:44:29 +08001844 .invalidatepage = f2fs_invalidate_page,
1845 .releasepage = f2fs_release_page,
Weichao Guo5b7a4872016-09-20 05:03:27 +08001846#ifdef CONFIG_MIGRATION
1847 .migratepage = f2fs_migrate_page,
1848#endif
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001849};
1850
Jaegeuk Kim8a7ed662014-02-21 14:29:35 +09001851static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1852 nid_t n)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001853{
Jaegeuk Kim8a7ed662014-02-21 14:29:35 +09001854 return radix_tree_lookup(&nm_i->free_nid_root, n);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001855}
1856
Chao Yu9a4ffdf2017-09-29 13:59:35 +08001857static int __insert_free_nid(struct f2fs_sb_info *sbi,
Fan Lia0761f62017-10-28 19:03:37 +08001858 struct free_nid *i, enum nid_state state)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001859{
Chao Yub8559dc2016-10-12 19:28:29 +08001860 struct f2fs_nm_info *nm_i = NM_I(sbi);
1861
Fan Lia0761f62017-10-28 19:03:37 +08001862 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
1863 if (err)
1864 return err;
Jaegeuk Kimeb0aa4b2016-10-12 10:09:59 -07001865
Chao Yu9a4ffdf2017-09-29 13:59:35 +08001866 f2fs_bug_on(sbi, state != i->state);
1867 nm_i->nid_cnt[state]++;
1868 if (state == FREE_NID)
1869 list_add_tail(&i->list, &nm_i->free_nid_list);
Jaegeuk Kimeb0aa4b2016-10-12 10:09:59 -07001870 return 0;
Chao Yub8559dc2016-10-12 19:28:29 +08001871}
1872
Chao Yu9a4ffdf2017-09-29 13:59:35 +08001873static void __remove_free_nid(struct f2fs_sb_info *sbi,
Fan Lia0761f62017-10-28 19:03:37 +08001874 struct free_nid *i, enum nid_state state)
Chao Yub8559dc2016-10-12 19:28:29 +08001875{
1876 struct f2fs_nm_info *nm_i = NM_I(sbi);
1877
Chao Yu9a4ffdf2017-09-29 13:59:35 +08001878 f2fs_bug_on(sbi, state != i->state);
1879 nm_i->nid_cnt[state]--;
1880 if (state == FREE_NID)
1881 list_del(&i->list);
Fan Lia0761f62017-10-28 19:03:37 +08001882 radix_tree_delete(&nm_i->free_nid_root, i->nid);
1883}
1884
1885static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
1886 enum nid_state org_state, enum nid_state dst_state)
1887{
1888 struct f2fs_nm_info *nm_i = NM_I(sbi);
1889
1890 f2fs_bug_on(sbi, org_state != i->state);
1891 i->state = dst_state;
1892 nm_i->nid_cnt[org_state]--;
1893 nm_i->nid_cnt[dst_state]++;
1894
1895 switch (dst_state) {
1896 case PREALLOC_NID:
1897 list_del(&i->list);
1898 break;
1899 case FREE_NID:
1900 list_add_tail(&i->list, &nm_i->free_nid_list);
1901 break;
1902 default:
1903 BUG_ON(1);
1904 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001905}
1906
LiFan5921aaa2017-11-22 16:07:23 +08001907static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
1908 bool set, bool build)
1909{
1910 struct f2fs_nm_info *nm_i = NM_I(sbi);
1911 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
1912 unsigned int nid_ofs = nid - START_NID(nid);
1913
1914 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
1915 return;
1916
1917 if (set) {
1918 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
1919 return;
1920 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
1921 nm_i->free_nid_count[nat_ofs]++;
1922 } else {
1923 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
1924 return;
1925 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
1926 if (!build)
1927 nm_i->free_nid_count[nat_ofs]--;
1928 }
1929}
1930
Chao Yu4ac91242017-02-23 10:53:49 +08001931/* return if the nid is recognized as free */
LiFan5921aaa2017-11-22 16:07:23 +08001932static bool add_free_nid(struct f2fs_sb_info *sbi,
1933 nid_t nid, bool build, bool update)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001934{
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +09001935 struct f2fs_nm_info *nm_i = NM_I(sbi);
Chao Yu30a61dd2017-03-22 14:45:05 +08001936 struct free_nid *i, *e;
Jaegeuk Kim59bbd472013-05-07 20:47:40 +09001937 struct nat_entry *ne;
Chao Yu30a61dd2017-03-22 14:45:05 +08001938 int err = -EINVAL;
1939 bool ret = false;
Jaegeuk Kim9198ace2013-04-25 13:21:12 +09001940
1941 /* 0 nid should not be used */
Chao Yucfb271d2013-12-05 17:15:22 +08001942 if (unlikely(nid == 0))
Chao Yu4ac91242017-02-23 10:53:49 +08001943 return false;
Jaegeuk Kim59bbd472013-05-07 20:47:40 +09001944
Gu Zheng7bd59382013-10-22 14:52:26 +08001945 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001946 i->nid = nid;
Chao Yu9a4ffdf2017-09-29 13:59:35 +08001947 i->state = FREE_NID;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001948
LiFan5921aaa2017-11-22 16:07:23 +08001949 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -08001950
Chao Yub8559dc2016-10-12 19:28:29 +08001951 spin_lock(&nm_i->nid_list_lock);
Chao Yu30a61dd2017-03-22 14:45:05 +08001952
1953 if (build) {
1954 /*
1955 * Thread A Thread B
1956 * - f2fs_create
1957 * - f2fs_new_inode
Chao Yu4d57b862018-05-30 00:20:41 +08001958 * - f2fs_alloc_nid
Chao Yu9a4ffdf2017-09-29 13:59:35 +08001959 * - __insert_nid_to_list(PREALLOC_NID)
Chao Yu30a61dd2017-03-22 14:45:05 +08001960 * - f2fs_balance_fs_bg
Chao Yu4d57b862018-05-30 00:20:41 +08001961 * - f2fs_build_free_nids
1962 * - __f2fs_build_free_nids
Chao Yu30a61dd2017-03-22 14:45:05 +08001963 * - scan_nat_page
1964 * - add_free_nid
1965 * - __lookup_nat_cache
1966 * - f2fs_add_link
Chao Yu4d57b862018-05-30 00:20:41 +08001967 * - f2fs_init_inode_metadata
1968 * - f2fs_new_inode_page
1969 * - f2fs_new_node_page
Chao Yu30a61dd2017-03-22 14:45:05 +08001970 * - set_node_addr
Chao Yu4d57b862018-05-30 00:20:41 +08001971 * - f2fs_alloc_nid_done
Chao Yu9a4ffdf2017-09-29 13:59:35 +08001972 * - __remove_nid_from_list(PREALLOC_NID)
1973 * - __insert_nid_to_list(FREE_NID)
Chao Yu30a61dd2017-03-22 14:45:05 +08001974 */
1975 ne = __lookup_nat_cache(nm_i, nid);
1976 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1977 nat_get_blkaddr(ne) != NULL_ADDR))
1978 goto err_out;
1979
1980 e = __lookup_free_nid_list(nm_i, nid);
1981 if (e) {
Chao Yu9a4ffdf2017-09-29 13:59:35 +08001982 if (e->state == FREE_NID)
Chao Yu30a61dd2017-03-22 14:45:05 +08001983 ret = true;
1984 goto err_out;
1985 }
1986 }
1987 ret = true;
Fan Lia0761f62017-10-28 19:03:37 +08001988 err = __insert_free_nid(sbi, i, FREE_NID);
Chao Yu30a61dd2017-03-22 14:45:05 +08001989err_out:
LiFan5921aaa2017-11-22 16:07:23 +08001990 if (update) {
1991 update_free_nid_bitmap(sbi, nid, ret, build);
1992 if (!build)
1993 nm_i->available_nids++;
1994 }
Jaegeuk Kimeb0aa4b2016-10-12 10:09:59 -07001995 spin_unlock(&nm_i->nid_list_lock);
1996 radix_tree_preload_end();
LiFan5921aaa2017-11-22 16:07:23 +08001997
Chao Yu30a61dd2017-03-22 14:45:05 +08001998 if (err)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001999 kmem_cache_free(free_nid_slab, i);
Chao Yu30a61dd2017-03-22 14:45:05 +08002000 return ret;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002001}
2002
Chao Yub8559dc2016-10-12 19:28:29 +08002003static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002004{
Chao Yub8559dc2016-10-12 19:28:29 +08002005 struct f2fs_nm_info *nm_i = NM_I(sbi);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002006 struct free_nid *i;
Chao Yucf0ee0f2014-04-02 08:55:00 +08002007 bool need_free = false;
2008
Chao Yub8559dc2016-10-12 19:28:29 +08002009 spin_lock(&nm_i->nid_list_lock);
Jaegeuk Kim8a7ed662014-02-21 14:29:35 +09002010 i = __lookup_free_nid_list(nm_i, nid);
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002011 if (i && i->state == FREE_NID) {
Fan Lia0761f62017-10-28 19:03:37 +08002012 __remove_free_nid(sbi, i, FREE_NID);
Chao Yucf0ee0f2014-04-02 08:55:00 +08002013 need_free = true;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002014 }
Chao Yub8559dc2016-10-12 19:28:29 +08002015 spin_unlock(&nm_i->nid_list_lock);
Chao Yucf0ee0f2014-04-02 08:55:00 +08002016
2017 if (need_free)
2018 kmem_cache_free(free_nid_slab, i);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002019}
2020
Chao Yue2374012018-06-15 14:45:57 +08002021static int scan_nat_page(struct f2fs_sb_info *sbi,
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002022 struct page *nat_page, nid_t start_nid)
2023{
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +09002024 struct f2fs_nm_info *nm_i = NM_I(sbi);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002025 struct f2fs_nat_block *nat_blk = page_address(nat_page);
2026 block_t blk_addr;
Chao Yu4ac91242017-02-23 10:53:49 +08002027 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002028 int i;
2029
Jaegeuk Kim23380b82017-03-07 14:11:06 -08002030 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
Chao Yu4ac91242017-02-23 10:53:49 +08002031
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002032 i = start_nid % NAT_ENTRY_PER_BLOCK;
2033
2034 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
Chao Yucfb271d2013-12-05 17:15:22 +08002035 if (unlikely(start_nid >= nm_i->max_nid))
Jaegeuk Kim04431c42013-03-16 08:34:37 +09002036 break;
Haicheng Li23d38842013-05-06 23:15:43 +08002037
2038 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
Chao Yue2374012018-06-15 14:45:57 +08002039
2040 if (blk_addr == NEW_ADDR)
2041 return -EINVAL;
2042
LiFan5921aaa2017-11-22 16:07:23 +08002043 if (blk_addr == NULL_ADDR) {
2044 add_free_nid(sbi, start_nid, true, true);
2045 } else {
2046 spin_lock(&NM_I(sbi)->nid_list_lock);
2047 update_free_nid_bitmap(sbi, start_nid, false, true);
2048 spin_unlock(&NM_I(sbi)->nid_list_lock);
2049 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002050 }
Chao Yue2374012018-06-15 14:45:57 +08002051
2052 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002053}
2054
Chao Yu2fbaa252017-11-08 17:47:36 +08002055static void scan_curseg_cache(struct f2fs_sb_info *sbi)
Chao Yu4ac91242017-02-23 10:53:49 +08002056{
Chao Yu4ac91242017-02-23 10:53:49 +08002057 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2058 struct f2fs_journal *journal = curseg->journal;
Chao Yu2fbaa252017-11-08 17:47:36 +08002059 int i;
Chao Yu4ac91242017-02-23 10:53:49 +08002060
Chao Yu4ac91242017-02-23 10:53:49 +08002061 down_read(&curseg->journal_rwsem);
2062 for (i = 0; i < nats_in_cursum(journal); i++) {
2063 block_t addr;
2064 nid_t nid;
2065
2066 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2067 nid = le32_to_cpu(nid_in_journal(journal, i));
2068 if (addr == NULL_ADDR)
LiFan5921aaa2017-11-22 16:07:23 +08002069 add_free_nid(sbi, nid, true, false);
Chao Yu4ac91242017-02-23 10:53:49 +08002070 else
2071 remove_free_nid(sbi, nid);
2072 }
2073 up_read(&curseg->journal_rwsem);
Chao Yu2fbaa252017-11-08 17:47:36 +08002074}
2075
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002076static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
Chao Yu4ac91242017-02-23 10:53:49 +08002077{
2078 struct f2fs_nm_info *nm_i = NM_I(sbi);
Chao Yu4ac91242017-02-23 10:53:49 +08002079 unsigned int i, idx;
Fan Li97456572017-11-07 19:14:24 +08002080 nid_t nid;
Chao Yu4ac91242017-02-23 10:53:49 +08002081
2082 down_read(&nm_i->nat_tree_lock);
2083
2084 for (i = 0; i < nm_i->nat_blocks; i++) {
2085 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2086 continue;
2087 if (!nm_i->free_nid_count[i])
2088 continue;
2089 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
Fan Li97456572017-11-07 19:14:24 +08002090 idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2091 NAT_ENTRY_PER_BLOCK, idx);
2092 if (idx >= NAT_ENTRY_PER_BLOCK)
2093 break;
Chao Yu4ac91242017-02-23 10:53:49 +08002094
2095 nid = i * NAT_ENTRY_PER_BLOCK + idx;
LiFan5921aaa2017-11-22 16:07:23 +08002096 add_free_nid(sbi, nid, true, false);
Chao Yu4ac91242017-02-23 10:53:49 +08002097
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002098 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
Chao Yu4ac91242017-02-23 10:53:49 +08002099 goto out;
2100 }
2101 }
2102out:
Chao Yu2fbaa252017-11-08 17:47:36 +08002103 scan_curseg_cache(sbi);
Chao Yu4ac91242017-02-23 10:53:49 +08002104
Chao Yu4ac91242017-02-23 10:53:49 +08002105 up_read(&nm_i->nat_tree_lock);
2106}
2107
Chao Yue2374012018-06-15 14:45:57 +08002108static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
Chao Yu4d57b862018-05-30 00:20:41 +08002109 bool sync, bool mount)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002110{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002111 struct f2fs_nm_info *nm_i = NM_I(sbi);
Chao Yue2374012018-06-15 14:45:57 +08002112 int i = 0, ret;
Jaegeuk Kim55008d82013-04-25 16:05:51 +09002113 nid_t nid = nm_i->next_scan_nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002114
Yunlei Hee9cdd302017-04-26 15:56:52 +08002115 if (unlikely(nid >= nm_i->max_nid))
2116 nid = 0;
2117
Jaegeuk Kim55008d82013-04-25 16:05:51 +09002118 /* Enough entries */
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002119 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
Chao Yue2374012018-06-15 14:45:57 +08002120 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002121
Chao Yu4d57b862018-05-30 00:20:41 +08002122 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
Chao Yue2374012018-06-15 14:45:57 +08002123 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002124
Chao Yu4ac91242017-02-23 10:53:49 +08002125 if (!mount) {
2126 /* try to find free nids in free_nid_bitmap */
2127 scan_free_nid_bits(sbi);
2128
Fan Li74986212017-11-07 11:04:33 +08002129 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
Chao Yue2374012018-06-15 14:45:57 +08002130 return 0;
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002131 }
2132
Jaegeuk Kim55008d82013-04-25 16:05:51 +09002133 /* readahead nat pages to be scanned */
Chao Yu4d57b862018-05-30 00:20:41 +08002134 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
Chao Yu26879fb2015-10-12 17:05:59 +08002135 META_NAT, true);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002136
Jaegeuk Kimb873b792016-08-04 11:38:25 -07002137 down_read(&nm_i->nat_tree_lock);
Jaegeuk Kima5131192016-01-02 09:19:41 -08002138
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002139 while (1) {
Yunlei He66e83362017-11-17 16:13:38 +08002140 if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2141 nm_i->nat_block_bitmap)) {
2142 struct page *page = get_current_nat_page(sbi, nid);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002143
Chao Yue2374012018-06-15 14:45:57 +08002144 ret = scan_nat_page(sbi, page, nid);
Yunlei He66e83362017-11-17 16:13:38 +08002145 f2fs_put_page(page, 1);
Chao Yue2374012018-06-15 14:45:57 +08002146
2147 if (ret) {
2148 up_read(&nm_i->nat_tree_lock);
2149 f2fs_bug_on(sbi, !mount);
2150 f2fs_msg(sbi->sb, KERN_ERR,
2151 "NAT is corrupt, run fsck to fix it");
2152 return -EINVAL;
2153 }
Yunlei He66e83362017-11-17 16:13:38 +08002154 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002155
2156 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
Chao Yucfb271d2013-12-05 17:15:22 +08002157 if (unlikely(nid >= nm_i->max_nid))
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002158 nid = 0;
Jaegeuk Kim55008d82013-04-25 16:05:51 +09002159
Chao Yua6d494b2015-07-24 18:26:26 +08002160 if (++i >= FREE_NID_PAGES)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002161 break;
2162 }
2163
Jaegeuk Kim55008d82013-04-25 16:05:51 +09002164 /* go to the next free nat pages to find free nids abundantly */
2165 nm_i->next_scan_nid = nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002166
2167 /* find free nids from current sum_pages */
Chao Yu2fbaa252017-11-08 17:47:36 +08002168 scan_curseg_cache(sbi);
Chao Yudfc08a12016-02-14 18:50:40 +08002169
Jaegeuk Kimb873b792016-08-04 11:38:25 -07002170 up_read(&nm_i->nat_tree_lock);
Chao Yu2db23882015-10-12 17:07:33 +08002171
Chao Yu4d57b862018-05-30 00:20:41 +08002172 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
Chao Yuea1a29a02015-10-12 17:08:48 +08002173 nm_i->ra_nid_pages, META_NAT, false);
Chao Yue2374012018-06-15 14:45:57 +08002174
2175 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002176}
2177
Chao Yue2374012018-06-15 14:45:57 +08002178int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
Chao Yu2411cf52016-10-11 22:31:34 +08002179{
Chao Yue2374012018-06-15 14:45:57 +08002180 int ret;
2181
Chao Yu2411cf52016-10-11 22:31:34 +08002182 mutex_lock(&NM_I(sbi)->build_lock);
Chao Yue2374012018-06-15 14:45:57 +08002183 ret = __f2fs_build_free_nids(sbi, sync, mount);
Chao Yu2411cf52016-10-11 22:31:34 +08002184 mutex_unlock(&NM_I(sbi)->build_lock);
Chao Yue2374012018-06-15 14:45:57 +08002185
2186 return ret;
Chao Yu2411cf52016-10-11 22:31:34 +08002187}
2188
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002189/*
2190 * If this function returns success, caller can obtain a new nid
2191 * from second parameter of this function.
2192 * The returned nid could be used ino as well as nid when inode is created.
2193 */
Chao Yu4d57b862018-05-30 00:20:41 +08002194bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002195{
2196 struct f2fs_nm_info *nm_i = NM_I(sbi);
2197 struct free_nid *i = NULL;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002198retry:
Jaegeuk Kimcb789422016-04-29 16:29:22 -07002199#ifdef CONFIG_F2FS_FAULT_INJECTION
Chao Yu55523512017-02-25 11:08:28 +08002200 if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2201 f2fs_show_injection_info(FAULT_ALLOC_NID);
Jaegeuk Kimcb789422016-04-29 16:29:22 -07002202 return false;
Chao Yu55523512017-02-25 11:08:28 +08002203 }
Jaegeuk Kimcb789422016-04-29 16:29:22 -07002204#endif
Chao Yub8559dc2016-10-12 19:28:29 +08002205 spin_lock(&nm_i->nid_list_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002206
Chao Yu04d47e62016-11-17 20:53:11 +08002207 if (unlikely(nm_i->available_nids == 0)) {
2208 spin_unlock(&nm_i->nid_list_lock);
2209 return false;
2210 }
Jaegeuk Kim55008d82013-04-25 16:05:51 +09002211
Chao Yu4d57b862018-05-30 00:20:41 +08002212 /* We should not use stale free nids created by f2fs_build_free_nids */
2213 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002214 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2215 i = list_first_entry(&nm_i->free_nid_list,
Chao Yub8559dc2016-10-12 19:28:29 +08002216 struct free_nid, list);
Jaegeuk Kim55008d82013-04-25 16:05:51 +09002217 *nid = i->nid;
Chao Yub8559dc2016-10-12 19:28:29 +08002218
Fan Lia0761f62017-10-28 19:03:37 +08002219 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
Chao Yu04d47e62016-11-17 20:53:11 +08002220 nm_i->available_nids--;
Chao Yu4ac91242017-02-23 10:53:49 +08002221
Chao Yu346fe752017-03-13 20:10:41 +08002222 update_free_nid_bitmap(sbi, *nid, false, false);
Chao Yu4ac91242017-02-23 10:53:49 +08002223
Chao Yub8559dc2016-10-12 19:28:29 +08002224 spin_unlock(&nm_i->nid_list_lock);
Jaegeuk Kim55008d82013-04-25 16:05:51 +09002225 return true;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002226 }
Chao Yub8559dc2016-10-12 19:28:29 +08002227 spin_unlock(&nm_i->nid_list_lock);
Jaegeuk Kim55008d82013-04-25 16:05:51 +09002228
2229 /* Let's scan nat pages and its caches to get free nids */
Chao Yu4d57b862018-05-30 00:20:41 +08002230 f2fs_build_free_nids(sbi, true, false);
Jaegeuk Kim55008d82013-04-25 16:05:51 +09002231 goto retry;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002232}
2233
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002234/*
Chao Yu4d57b862018-05-30 00:20:41 +08002235 * f2fs_alloc_nid() should be called prior to this function.
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002236 */
Chao Yu4d57b862018-05-30 00:20:41 +08002237void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002238{
2239 struct f2fs_nm_info *nm_i = NM_I(sbi);
2240 struct free_nid *i;
2241
Chao Yub8559dc2016-10-12 19:28:29 +08002242 spin_lock(&nm_i->nid_list_lock);
Jaegeuk Kim8a7ed662014-02-21 14:29:35 +09002243 i = __lookup_free_nid_list(nm_i, nid);
Chao Yub8559dc2016-10-12 19:28:29 +08002244 f2fs_bug_on(sbi, !i);
Fan Lia0761f62017-10-28 19:03:37 +08002245 __remove_free_nid(sbi, i, PREALLOC_NID);
Chao Yub8559dc2016-10-12 19:28:29 +08002246 spin_unlock(&nm_i->nid_list_lock);
Chao Yucf0ee0f2014-04-02 08:55:00 +08002247
2248 kmem_cache_free(free_nid_slab, i);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002249}
2250
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002251/*
Chao Yu4d57b862018-05-30 00:20:41 +08002252 * f2fs_alloc_nid() should be called prior to this function.
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002253 */
Chao Yu4d57b862018-05-30 00:20:41 +08002254void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002255{
Jaegeuk Kim49952fa2013-04-03 22:19:03 +09002256 struct f2fs_nm_info *nm_i = NM_I(sbi);
2257 struct free_nid *i;
Chao Yucf0ee0f2014-04-02 08:55:00 +08002258 bool need_free = false;
Jaegeuk Kim49952fa2013-04-03 22:19:03 +09002259
Jaegeuk Kim65985d92013-08-14 21:57:27 +09002260 if (!nid)
2261 return;
2262
Chao Yub8559dc2016-10-12 19:28:29 +08002263 spin_lock(&nm_i->nid_list_lock);
Jaegeuk Kim8a7ed662014-02-21 14:29:35 +09002264 i = __lookup_free_nid_list(nm_i, nid);
Chao Yub8559dc2016-10-12 19:28:29 +08002265 f2fs_bug_on(sbi, !i);
2266
Chao Yu4d57b862018-05-30 00:20:41 +08002267 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
Fan Lia0761f62017-10-28 19:03:37 +08002268 __remove_free_nid(sbi, i, PREALLOC_NID);
Chao Yucf0ee0f2014-04-02 08:55:00 +08002269 need_free = true;
Haicheng Li95630cb2013-05-06 23:15:41 +08002270 } else {
Fan Lia0761f62017-10-28 19:03:37 +08002271 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
Haicheng Li95630cb2013-05-06 23:15:41 +08002272 }
Chao Yu04d47e62016-11-17 20:53:11 +08002273
2274 nm_i->available_nids++;
2275
Chao Yu346fe752017-03-13 20:10:41 +08002276 update_free_nid_bitmap(sbi, nid, true, false);
Chao Yu4ac91242017-02-23 10:53:49 +08002277
Chao Yub8559dc2016-10-12 19:28:29 +08002278 spin_unlock(&nm_i->nid_list_lock);
Chao Yucf0ee0f2014-04-02 08:55:00 +08002279
2280 if (need_free)
2281 kmem_cache_free(free_nid_slab, i);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002282}
2283
Chao Yu4d57b862018-05-30 00:20:41 +08002284int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
Chao Yu31696582015-07-28 18:33:46 +08002285{
2286 struct f2fs_nm_info *nm_i = NM_I(sbi);
2287 struct free_nid *i, *next;
2288 int nr = nr_shrink;
2289
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002290 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
Jaegeuk Kimad4edb82016-06-16 16:41:49 -07002291 return 0;
2292
Chao Yu31696582015-07-28 18:33:46 +08002293 if (!mutex_trylock(&nm_i->build_lock))
2294 return 0;
2295
Chao Yub8559dc2016-10-12 19:28:29 +08002296 spin_lock(&nm_i->nid_list_lock);
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002297 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
Chao Yub8559dc2016-10-12 19:28:29 +08002298 if (nr_shrink <= 0 ||
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002299 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
Chao Yu31696582015-07-28 18:33:46 +08002300 break;
Chao Yub8559dc2016-10-12 19:28:29 +08002301
Fan Lia0761f62017-10-28 19:03:37 +08002302 __remove_free_nid(sbi, i, FREE_NID);
Chao Yu31696582015-07-28 18:33:46 +08002303 kmem_cache_free(free_nid_slab, i);
2304 nr_shrink--;
Chao Yu31696582015-07-28 18:33:46 +08002305 }
Chao Yub8559dc2016-10-12 19:28:29 +08002306 spin_unlock(&nm_i->nid_list_lock);
Chao Yu31696582015-07-28 18:33:46 +08002307 mutex_unlock(&nm_i->build_lock);
2308
2309 return nr - nr_shrink;
2310}
2311
Chao Yu4d57b862018-05-30 00:20:41 +08002312void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
Chao Yu28cdce02014-03-11 13:37:38 +08002313{
Chao Yu28cdce02014-03-11 13:37:38 +08002314 void *src_addr, *dst_addr;
2315 size_t inline_size;
2316 struct page *ipage;
2317 struct f2fs_inode *ri;
2318
Chao Yu4d57b862018-05-30 00:20:41 +08002319 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07002320 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
Chao Yu28cdce02014-03-11 13:37:38 +08002321
Jaegeuk Kime3b4d432014-08-07 23:45:42 -07002322 ri = F2FS_INODE(page);
Yunlei He1eca05a2018-01-03 18:03:04 +08002323 if (ri->i_inline & F2FS_INLINE_XATTR) {
2324 set_inode_flag(inode, FI_INLINE_XATTR);
2325 } else {
Jaegeuk Kim91942322016-05-20 10:13:22 -07002326 clear_inode_flag(inode, FI_INLINE_XATTR);
Jaegeuk Kime3b4d432014-08-07 23:45:42 -07002327 goto update_inode;
2328 }
2329
Chao Yu6afc6622017-09-06 21:59:50 +08002330 dst_addr = inline_xattr_addr(inode, ipage);
2331 src_addr = inline_xattr_addr(inode, page);
Chao Yu28cdce02014-03-11 13:37:38 +08002332 inline_size = inline_xattr_size(inode);
2333
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08002334 f2fs_wait_on_page_writeback(ipage, NODE, true);
Chao Yu28cdce02014-03-11 13:37:38 +08002335 memcpy(dst_addr, src_addr, inline_size);
Jaegeuk Kime3b4d432014-08-07 23:45:42 -07002336update_inode:
Chao Yu4d57b862018-05-30 00:20:41 +08002337 f2fs_update_inode(inode, ipage);
Chao Yu28cdce02014-03-11 13:37:38 +08002338 f2fs_put_page(ipage, 1);
2339}
2340
Chao Yu4d57b862018-05-30 00:20:41 +08002341int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
Jaegeuk Kimabb23662014-01-28 12:25:06 +09002342{
Jaegeuk Kim40813632014-09-02 15:31:18 -07002343 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimabb23662014-01-28 12:25:06 +09002344 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
Yunlei He87905682017-07-18 09:48:12 +08002345 nid_t new_xnid;
2346 struct dnode_of_data dn;
Jaegeuk Kimabb23662014-01-28 12:25:06 +09002347 struct node_info ni;
Chao Yud2600812017-02-08 17:39:45 +08002348 struct page *xpage;
Chao Yu77357302018-07-17 00:02:17 +08002349 int err;
Jaegeuk Kimabb23662014-01-28 12:25:06 +09002350
Jaegeuk Kimabb23662014-01-28 12:25:06 +09002351 if (!prev_xnid)
2352 goto recover_xnid;
2353
Chao Yud2600812017-02-08 17:39:45 +08002354 /* 1: invalidate the previous xattr nid */
Chao Yu77357302018-07-17 00:02:17 +08002355 err = f2fs_get_node_info(sbi, prev_xnid, &ni);
2356 if (err)
2357 return err;
2358
Chao Yu4d57b862018-05-30 00:20:41 +08002359 f2fs_invalidate_blocks(sbi, ni.blk_addr);
Chao Yu000519f2017-07-06 01:11:31 +08002360 dec_valid_node_count(sbi, inode, false);
Jaegeuk Kim479f40c2014-03-20 21:52:53 +09002361 set_node_addr(sbi, &ni, NULL_ADDR, false);
Jaegeuk Kimabb23662014-01-28 12:25:06 +09002362
2363recover_xnid:
Chao Yud2600812017-02-08 17:39:45 +08002364 /* 2: update xattr nid in inode */
Chao Yu4d57b862018-05-30 00:20:41 +08002365 if (!f2fs_alloc_nid(sbi, &new_xnid))
Yunlei He87905682017-07-18 09:48:12 +08002366 return -ENOSPC;
2367
2368 set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
Chao Yu4d57b862018-05-30 00:20:41 +08002369 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
Yunlei He87905682017-07-18 09:48:12 +08002370 if (IS_ERR(xpage)) {
Chao Yu4d57b862018-05-30 00:20:41 +08002371 f2fs_alloc_nid_failed(sbi, new_xnid);
Yunlei He87905682017-07-18 09:48:12 +08002372 return PTR_ERR(xpage);
2373 }
2374
Chao Yu4d57b862018-05-30 00:20:41 +08002375 f2fs_alloc_nid_done(sbi, new_xnid);
2376 f2fs_update_inode_page(inode);
Jaegeuk Kimabb23662014-01-28 12:25:06 +09002377
Chao Yud2600812017-02-08 17:39:45 +08002378 /* 3: update and set xattr node page dirty */
Yunlei He87905682017-07-18 09:48:12 +08002379 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
Chao Yud2600812017-02-08 17:39:45 +08002380
Chao Yud2600812017-02-08 17:39:45 +08002381 set_page_dirty(xpage);
2382 f2fs_put_page(xpage, 1);
Jaegeuk Kimabb23662014-01-28 12:25:06 +09002383
Chao Yud2600812017-02-08 17:39:45 +08002384 return 0;
Jaegeuk Kimabb23662014-01-28 12:25:06 +09002385}
2386
Chao Yu4d57b862018-05-30 00:20:41 +08002387int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002388{
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +09002389 struct f2fs_inode *src, *dst;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002390 nid_t ino = ino_of_node(page);
2391 struct node_info old_ni, new_ni;
2392 struct page *ipage;
Chao Yu77357302018-07-17 00:02:17 +08002393 int err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002394
Chao Yu77357302018-07-17 00:02:17 +08002395 err = f2fs_get_node_info(sbi, ino, &old_ni);
2396 if (err)
2397 return err;
Jaegeuk Kime8271fa2014-04-18 15:21:04 +09002398
2399 if (unlikely(old_ni.blk_addr != NULL_ADDR))
2400 return -EINVAL;
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -07002401retry:
Jaegeuk Kim300e1292016-04-29 16:11:53 -07002402 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -07002403 if (!ipage) {
2404 congestion_wait(BLK_RW_ASYNC, HZ/50);
2405 goto retry;
2406 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002407
arter97e1c42042014-08-06 23:22:50 +09002408 /* Should not use this inode from free nid list */
Chao Yub8559dc2016-10-12 19:28:29 +08002409 remove_free_nid(sbi, ino);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002410
Jaegeuk Kim237c0792016-06-30 18:49:15 -07002411 if (!PageUptodate(ipage))
2412 SetPageUptodate(ipage);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002413 fill_node_footer(ipage, ino, ino, 0, true);
Chao Yuc5667572018-03-09 14:24:22 +08002414 set_cold_node(page, false);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002415
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +09002416 src = F2FS_INODE(page);
2417 dst = F2FS_INODE(ipage);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002418
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +09002419 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2420 dst->i_size = 0;
2421 dst->i_blocks = cpu_to_le64(1);
2422 dst->i_links = cpu_to_le32(1);
2423 dst->i_xattr_nid = 0;
Chao Yu7a2af762017-07-19 00:19:06 +08002424 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
Chao Yu5c571322017-07-26 00:01:41 +08002425 if (dst->i_inline & F2FS_EXTRA_ATTR) {
Chao Yu7a2af762017-07-19 00:19:06 +08002426 dst->i_extra_isize = src->i_extra_isize;
Chao Yu6afc6622017-09-06 21:59:50 +08002427
2428 if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) &&
2429 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2430 i_inline_xattr_size))
2431 dst->i_inline_xattr_size = src->i_inline_xattr_size;
2432
Chao Yu5c571322017-07-26 00:01:41 +08002433 if (f2fs_sb_has_project_quota(sbi->sb) &&
2434 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2435 i_projid))
2436 dst->i_projid = src->i_projid;
2437 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002438
2439 new_ni = old_ni;
2440 new_ni.ino = ino;
2441
Chao Yu0abd6752017-07-09 00:13:07 +08002442 if (unlikely(inc_valid_node_count(sbi, NULL, true)))
Jaegeuk Kim65e5cd02013-05-14 15:47:43 +09002443 WARN_ON(1);
Jaegeuk Kim479f40c2014-03-20 21:52:53 +09002444 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002445 inc_valid_inode_count(sbi);
Jaegeuk Kim617deb82014-08-07 17:04:24 -07002446 set_page_dirty(ipage);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002447 f2fs_put_page(ipage, 1);
2448 return 0;
2449}
2450
Chao Yu77357302018-07-17 00:02:17 +08002451int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002452 unsigned int segno, struct f2fs_summary_block *sum)
2453{
2454 struct f2fs_node *rn;
2455 struct f2fs_summary *sum_entry;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002456 block_t addr;
Chao Yu9ecf4b82014-12-18 18:29:05 +08002457 int i, idx, last_offset, nrpages;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002458
2459 /* scan the node segment */
2460 last_offset = sbi->blocks_per_seg;
2461 addr = START_BLOCK(sbi, segno);
2462 sum_entry = &sum->entries[0];
2463
Chao Yu9ecf4b82014-12-18 18:29:05 +08002464 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
Jaegeuk Kim664ba972016-10-18 11:07:45 -07002465 nrpages = min(last_offset - i, BIO_MAX_PAGES);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09002466
arter97e1c42042014-08-06 23:22:50 +09002467 /* readahead node pages */
Chao Yu4d57b862018-05-30 00:20:41 +08002468 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09002469
Chao Yu9ecf4b82014-12-18 18:29:05 +08002470 for (idx = addr; idx < addr + nrpages; idx++) {
Chao Yu4d57b862018-05-30 00:20:41 +08002471 struct page *page = f2fs_get_tmp_page(sbi, idx);
Chao Yu9af0ff12013-11-22 15:48:54 +08002472
Chao Yu77357302018-07-17 00:02:17 +08002473 if (IS_ERR(page))
2474 return PTR_ERR(page);
2475
Chao Yu9ecf4b82014-12-18 18:29:05 +08002476 rn = F2FS_NODE(page);
2477 sum_entry->nid = rn->footer.nid;
2478 sum_entry->version = 0;
2479 sum_entry->ofs_in_node = 0;
2480 sum_entry++;
2481 f2fs_put_page(page, 1);
Chao Yu9af0ff12013-11-22 15:48:54 +08002482 }
Chao Yubac4eef2014-05-27 08:41:07 +08002483
Chao Yu9ecf4b82014-12-18 18:29:05 +08002484 invalidate_mapping_pages(META_MAPPING(sbi), addr,
Chao Yubac4eef2014-05-27 08:41:07 +08002485 addr + nrpages);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002486 }
Chao Yu77357302018-07-17 00:02:17 +08002487 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002488}
2489
Chao Yuaec71382014-06-24 09:18:20 +08002490static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002491{
2492 struct f2fs_nm_info *nm_i = NM_I(sbi);
2493 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002494 struct f2fs_journal *journal = curseg->journal;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002495 int i;
2496
Chao Yub7ad7512016-02-19 18:08:46 +08002497 down_write(&curseg->journal_rwsem);
Chao Yudfc08a12016-02-14 18:50:40 +08002498 for (i = 0; i < nats_in_cursum(journal); i++) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002499 struct nat_entry *ne;
2500 struct f2fs_nat_entry raw_ne;
Chao Yudfc08a12016-02-14 18:50:40 +08002501 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002502
Chao Yudfc08a12016-02-14 18:50:40 +08002503 raw_ne = nat_in_journal(journal, i);
Jaegeuk Kim9be32d72014-12-05 10:39:49 -08002504
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002505 ne = __lookup_nat_cache(nm_i, nid);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002506 if (!ne) {
Yunlei He12f9ef32017-11-10 13:36:51 -08002507 ne = __alloc_nat_entry(nid, true);
2508 __init_nat_entry(nm_i, ne, &raw_ne, true);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002509 }
Chao Yu04d47e62016-11-17 20:53:11 +08002510
2511 /*
2512 * if a free nat in journal has not been used after last
2513 * checkpoint, we should remove it from available nids,
2514 * since later we will add it again.
2515 */
2516 if (!get_nat_flag(ne, IS_DIRTY) &&
2517 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2518 spin_lock(&nm_i->nid_list_lock);
2519 nm_i->available_nids--;
2520 spin_unlock(&nm_i->nid_list_lock);
2521 }
2522
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002523 __set_nat_cache_dirty(nm_i, ne);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002524 }
Chao Yudfc08a12016-02-14 18:50:40 +08002525 update_nats_in_cursum(journal, -i);
Chao Yub7ad7512016-02-19 18:08:46 +08002526 up_write(&curseg->journal_rwsem);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002527}
2528
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002529static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2530 struct list_head *head, int max)
2531{
2532 struct nat_entry_set *cur;
2533
2534 if (nes->entry_cnt >= max)
2535 goto add_out;
2536
2537 list_for_each_entry(cur, head, set_list) {
2538 if (cur->entry_cnt >= nes->entry_cnt) {
2539 list_add(&nes->set_list, cur->set_list.prev);
2540 return;
2541 }
2542 }
2543add_out:
2544 list_add_tail(&nes->set_list, head);
2545}
2546
Jaegeuk Kim9f7e4a22017-03-10 09:39:57 -08002547static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002548 struct page *page)
2549{
2550 struct f2fs_nm_info *nm_i = NM_I(sbi);
2551 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2552 struct f2fs_nat_block *nat_blk = page_address(page);
2553 int valid = 0;
Fan Li37a0ab22017-10-30 15:19:48 +08002554 int i = 0;
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002555
2556 if (!enabled_nat_bits(sbi, NULL))
2557 return;
2558
Fan Li37a0ab22017-10-30 15:19:48 +08002559 if (nat_index == 0) {
2560 valid = 1;
2561 i = 1;
2562 }
2563 for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2564 if (nat_blk->entries[i].block_addr != NULL_ADDR)
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002565 valid++;
2566 }
2567 if (valid == 0) {
Jaegeuk Kim23380b82017-03-07 14:11:06 -08002568 __set_bit_le(nat_index, nm_i->empty_nat_bits);
2569 __clear_bit_le(nat_index, nm_i->full_nat_bits);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002570 return;
2571 }
2572
Jaegeuk Kim23380b82017-03-07 14:11:06 -08002573 __clear_bit_le(nat_index, nm_i->empty_nat_bits);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002574 if (valid == NAT_ENTRY_PER_BLOCK)
Jaegeuk Kim23380b82017-03-07 14:11:06 -08002575 __set_bit_le(nat_index, nm_i->full_nat_bits);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002576 else
Jaegeuk Kim23380b82017-03-07 14:11:06 -08002577 __clear_bit_le(nat_index, nm_i->full_nat_bits);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002578}
2579
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002580static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002581 struct nat_entry_set *set, struct cp_control *cpc)
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002582{
2583 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002584 struct f2fs_journal *journal = curseg->journal;
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002585 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2586 bool to_journal = true;
2587 struct f2fs_nat_block *nat_blk;
2588 struct nat_entry *ne, *cur;
2589 struct page *page = NULL;
2590
2591 /*
2592 * there are two steps to flush nat entries:
2593 * #1, flush nat entries to journal in current hot data summary block.
2594 * #2, flush nat entries to nat page.
2595 */
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002596 if (enabled_nat_bits(sbi, cpc) ||
2597 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002598 to_journal = false;
2599
2600 if (to_journal) {
Chao Yub7ad7512016-02-19 18:08:46 +08002601 down_write(&curseg->journal_rwsem);
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002602 } else {
2603 page = get_next_nat_page(sbi, start_nid);
2604 nat_blk = page_address(page);
2605 f2fs_bug_on(sbi, !nat_blk);
2606 }
2607
2608 /* flush dirty nats in nat entry set */
2609 list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2610 struct f2fs_nat_entry *raw_ne;
2611 nid_t nid = nat_get_nid(ne);
2612 int offset;
2613
Chao Yufebeca62017-06-05 18:29:08 +08002614 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002615
2616 if (to_journal) {
Chao Yu4d57b862018-05-30 00:20:41 +08002617 offset = f2fs_lookup_journal_in_cursum(journal,
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002618 NAT_JOURNAL, nid, 1);
2619 f2fs_bug_on(sbi, offset < 0);
Chao Yudfc08a12016-02-14 18:50:40 +08002620 raw_ne = &nat_in_journal(journal, offset);
2621 nid_in_journal(journal, offset) = cpu_to_le32(nid);
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002622 } else {
2623 raw_ne = &nat_blk->entries[nid - start_nid];
2624 }
2625 raw_nat_from_node_info(raw_ne, &ne->ni);
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002626 nat_reset_flag(ne);
Kinglong Mee0b28b712017-02-28 21:34:47 +08002627 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
Chao Yu04d47e62016-11-17 20:53:11 +08002628 if (nat_get_blkaddr(ne) == NULL_ADDR) {
LiFan5921aaa2017-11-22 16:07:23 +08002629 add_free_nid(sbi, nid, false, true);
Chao Yu4ac91242017-02-23 10:53:49 +08002630 } else {
2631 spin_lock(&NM_I(sbi)->nid_list_lock);
Chao Yu346fe752017-03-13 20:10:41 +08002632 update_free_nid_bitmap(sbi, nid, false, false);
Chao Yu04d47e62016-11-17 20:53:11 +08002633 spin_unlock(&NM_I(sbi)->nid_list_lock);
2634 }
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002635 }
2636
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002637 if (to_journal) {
Chao Yub7ad7512016-02-19 18:08:46 +08002638 up_write(&curseg->journal_rwsem);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002639 } else {
2640 __update_nat_bits(sbi, start_nid, page);
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002641 f2fs_put_page(page, 1);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002642 }
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002643
Yunlei He59c90812017-03-13 20:22:18 +08002644 /* Allow dirty nats by node block allocation in write_begin */
2645 if (!set->entry_cnt) {
2646 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2647 kmem_cache_free(nat_entry_set_slab, set);
2648 }
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002649}
2650
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002651/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002652 * This function is called during the checkpointing process.
2653 */
Chao Yu4d57b862018-05-30 00:20:41 +08002654void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002655{
2656 struct f2fs_nm_info *nm_i = NM_I(sbi);
2657 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
Chao Yub7ad7512016-02-19 18:08:46 +08002658 struct f2fs_journal *journal = curseg->journal;
Jaegeuk Kim7aed0d42015-01-07 10:47:57 -08002659 struct nat_entry_set *setvec[SETVEC_SIZE];
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002660 struct nat_entry_set *set, *tmp;
2661 unsigned int found;
2662 nid_t set_idx = 0;
2663 LIST_HEAD(sets);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002664
Jaegeuk Kim7f2ecdd2018-06-28 19:34:40 -07002665 /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
2666 if (enabled_nat_bits(sbi, cpc)) {
2667 down_write(&nm_i->nat_tree_lock);
2668 remove_nats_in_journal(sbi);
2669 up_write(&nm_i->nat_tree_lock);
2670 }
2671
Changman Lee20d047c2014-11-25 12:44:23 +09002672 if (!nm_i->dirty_nat_cnt)
2673 return;
Jaegeuk Kima5131192016-01-02 09:19:41 -08002674
Jaegeuk Kimb873b792016-08-04 11:38:25 -07002675 down_write(&nm_i->nat_tree_lock);
Jaegeuk Kima5131192016-01-02 09:19:41 -08002676
Chao Yuaec71382014-06-24 09:18:20 +08002677 /*
2678 * if there are no enough space in journal to store dirty nat
2679 * entries, remove all entries from journal and merge them
2680 * into nat entry set.
2681 */
Jaegeuk Kim900f7362017-02-27 21:28:53 -08002682 if (enabled_nat_bits(sbi, cpc) ||
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002683 !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
Chao Yuaec71382014-06-24 09:18:20 +08002684 remove_nats_in_journal(sbi);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002685
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002686 while ((found = __gang_lookup_nat_set(nm_i,
Jaegeuk Kim7aed0d42015-01-07 10:47:57 -08002687 set_idx, SETVEC_SIZE, setvec))) {
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002688 unsigned idx;
2689 set_idx = setvec[found - 1]->set + 1;
2690 for (idx = 0; idx < found; idx++)
2691 __adjust_nat_entry_set(setvec[idx], &sets,
Chao Yudfc08a12016-02-14 18:50:40 +08002692 MAX_NAT_JENTRIES(journal));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002693 }
Chao Yuaec71382014-06-24 09:18:20 +08002694
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002695 /* flush dirty nats in nat entry set */
2696 list_for_each_entry_safe(set, tmp, &sets, set_list)
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002697 __flush_nat_entry_set(sbi, set, cpc);
Jaegeuk Kim309cc2b2014-09-22 11:40:48 -07002698
Jaegeuk Kimb873b792016-08-04 11:38:25 -07002699 up_write(&nm_i->nat_tree_lock);
Yunlei He59c90812017-03-13 20:22:18 +08002700 /* Allow dirty nats by node block allocation in write_begin */
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002701}
2702
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002703static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
2704{
2705 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2706 struct f2fs_nm_info *nm_i = NM_I(sbi);
2707 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
2708 unsigned int i;
2709 __u64 cp_ver = cur_cp_version(ckpt);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002710 block_t nat_bits_addr;
2711
2712 if (!enabled_nat_bits(sbi, NULL))
2713 return 0;
2714
Chao Yudf033ca2018-03-20 23:08:29 +08002715 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
Chao Yuacbf0542017-11-30 19:28:17 +08002716 nm_i->nat_bits = f2fs_kzalloc(sbi,
2717 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002718 if (!nm_i->nat_bits)
2719 return -ENOMEM;
2720
2721 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
2722 nm_i->nat_bits_blocks;
2723 for (i = 0; i < nm_i->nat_bits_blocks; i++) {
Chao Yu77357302018-07-17 00:02:17 +08002724 struct page *page;
2725
2726 page = f2fs_get_meta_page(sbi, nat_bits_addr++);
2727 if (IS_ERR(page)) {
2728 disable_nat_bits(sbi, true);
2729 return PTR_ERR(page);
2730 }
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002731
2732 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
2733 page_address(page), F2FS_BLKSIZE);
2734 f2fs_put_page(page, 1);
2735 }
2736
Kinglong Meeced2c7e2017-02-25 19:53:39 +08002737 cp_ver |= (cur_cp_crc(ckpt) << 32);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002738 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
2739 disable_nat_bits(sbi, true);
2740 return 0;
2741 }
2742
2743 nm_i->full_nat_bits = nm_i->nat_bits + 8;
2744 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
2745
2746 f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint");
2747 return 0;
2748}
2749
Hou Pengyangbd80a4b2017-05-17 02:48:48 +00002750static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
Chao Yu7041d5d2017-03-08 20:07:49 +08002751{
2752 struct f2fs_nm_info *nm_i = NM_I(sbi);
2753 unsigned int i = 0;
2754 nid_t nid, last_nid;
2755
2756 if (!enabled_nat_bits(sbi, NULL))
2757 return;
2758
2759 for (i = 0; i < nm_i->nat_blocks; i++) {
2760 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
2761 if (i >= nm_i->nat_blocks)
2762 break;
2763
2764 __set_bit_le(i, nm_i->nat_block_bitmap);
2765
2766 nid = i * NAT_ENTRY_PER_BLOCK;
Fan Lif6986ed2017-11-02 11:02:52 +08002767 last_nid = nid + NAT_ENTRY_PER_BLOCK;
Chao Yu7041d5d2017-03-08 20:07:49 +08002768
Chao Yu346fe752017-03-13 20:10:41 +08002769 spin_lock(&NM_I(sbi)->nid_list_lock);
Chao Yu7041d5d2017-03-08 20:07:49 +08002770 for (; nid < last_nid; nid++)
Chao Yu346fe752017-03-13 20:10:41 +08002771 update_free_nid_bitmap(sbi, nid, true, true);
2772 spin_unlock(&NM_I(sbi)->nid_list_lock);
Chao Yu7041d5d2017-03-08 20:07:49 +08002773 }
2774
2775 for (i = 0; i < nm_i->nat_blocks; i++) {
2776 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
2777 if (i >= nm_i->nat_blocks)
2778 break;
2779
2780 __set_bit_le(i, nm_i->nat_block_bitmap);
2781 }
2782}
2783
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002784static int init_node_manager(struct f2fs_sb_info *sbi)
2785{
2786 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2787 struct f2fs_nm_info *nm_i = NM_I(sbi);
2788 unsigned char *version_bitmap;
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002789 unsigned int nat_segs;
2790 int err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002791
2792 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2793
2794 /* segment_count_nat includes pair segment so divide to 2. */
2795 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002796 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
2797 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
Jaegeuk Kim7ee0eea2014-04-18 11:14:37 +09002798
Jaegeuk Kimb63da152014-02-17 12:44:20 +09002799 /* not used nids: 0, node, meta, (and root counted as valid node) */
Chao Yu04d47e62016-11-17 20:53:11 +08002800 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
Chao Yu292c196a2017-11-16 16:59:14 +08002801 sbi->nquota_files - F2FS_RESERVED_NODE_NUM;
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002802 nm_i->nid_cnt[FREE_NID] = 0;
2803 nm_i->nid_cnt[PREALLOC_NID] = 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002804 nm_i->nat_cnt = 0;
Jaegeuk Kimcdfc41c2014-03-19 13:31:37 +09002805 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
Chao Yuea1a29a02015-10-12 17:08:48 +08002806 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
Chao Yu2304cb02016-01-18 18:32:58 +08002807 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002808
Jaegeuk Kim8a7ed662014-02-21 14:29:35 +09002809 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002810 INIT_LIST_HEAD(&nm_i->free_nid_list);
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -08002811 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2812 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002813 INIT_LIST_HEAD(&nm_i->nat_entries);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002814
2815 mutex_init(&nm_i->build_lock);
Chao Yub8559dc2016-10-12 19:28:29 +08002816 spin_lock_init(&nm_i->nid_list_lock);
Jaegeuk Kimb873b792016-08-04 11:38:25 -07002817 init_rwsem(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002818
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002819 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02002820 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002821 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2822 if (!version_bitmap)
2823 return -EFAULT;
2824
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02002825 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2826 GFP_KERNEL);
2827 if (!nm_i->nat_bitmap)
2828 return -ENOMEM;
Chao Yu599a09b2017-01-07 18:52:01 +08002829
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002830 err = __get_nat_bitmaps(sbi);
2831 if (err)
2832 return err;
2833
Chao Yu599a09b2017-01-07 18:52:01 +08002834#ifdef CONFIG_F2FS_CHECK_FS
2835 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
2836 GFP_KERNEL);
2837 if (!nm_i->nat_bitmap_mir)
2838 return -ENOMEM;
2839#endif
2840
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002841 return 0;
2842}
2843
Jaegeuk Kim9f7e4a22017-03-10 09:39:57 -08002844static int init_free_nid_cache(struct f2fs_sb_info *sbi)
Chao Yu4ac91242017-02-23 10:53:49 +08002845{
2846 struct f2fs_nm_info *nm_i = NM_I(sbi);
Jaegeuk Kimbb1105e2018-03-09 17:42:28 -08002847 int i;
Chao Yu4ac91242017-02-23 10:53:49 +08002848
Kees Cook026f0502018-06-12 14:28:23 -07002849 nm_i->free_nid_bitmap =
2850 f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
2851 nm_i->nat_blocks),
2852 GFP_KERNEL);
Chao Yu4ac91242017-02-23 10:53:49 +08002853 if (!nm_i->free_nid_bitmap)
2854 return -ENOMEM;
2855
Jaegeuk Kimbb1105e2018-03-09 17:42:28 -08002856 for (i = 0; i < nm_i->nat_blocks; i++) {
2857 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
Yunlei Hee15d54d2018-06-27 14:46:21 +08002858 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
Yunlei He68c43a22018-07-02 10:40:19 +08002859 if (!nm_i->free_nid_bitmap[i])
Jaegeuk Kimbb1105e2018-03-09 17:42:28 -08002860 return -ENOMEM;
2861 }
2862
Chao Yu628b3d12017-11-30 19:28:18 +08002863 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
Chao Yu4ac91242017-02-23 10:53:49 +08002864 GFP_KERNEL);
2865 if (!nm_i->nat_block_bitmap)
2866 return -ENOMEM;
Chao Yu586d1492017-03-01 17:09:07 +08002867
Kees Cook9d2a7892018-06-12 14:28:35 -07002868 nm_i->free_nid_count =
2869 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
2870 nm_i->nat_blocks),
2871 GFP_KERNEL);
Chao Yu586d1492017-03-01 17:09:07 +08002872 if (!nm_i->free_nid_count)
2873 return -ENOMEM;
Chao Yu4ac91242017-02-23 10:53:49 +08002874 return 0;
2875}
2876
Chao Yu4d57b862018-05-30 00:20:41 +08002877int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002878{
2879 int err;
2880
Chao Yuacbf0542017-11-30 19:28:17 +08002881 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
2882 GFP_KERNEL);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002883 if (!sbi->nm_info)
2884 return -ENOMEM;
2885
2886 err = init_node_manager(sbi);
2887 if (err)
2888 return err;
2889
Chao Yu4ac91242017-02-23 10:53:49 +08002890 err = init_free_nid_cache(sbi);
2891 if (err)
2892 return err;
2893
Chao Yu7041d5d2017-03-08 20:07:49 +08002894 /* load free nid status from nat_bits table */
2895 load_free_nid_bitmap(sbi);
2896
Chao Yue2374012018-06-15 14:45:57 +08002897 return f2fs_build_free_nids(sbi, true, true);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002898}
2899
Chao Yu4d57b862018-05-30 00:20:41 +08002900void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002901{
2902 struct f2fs_nm_info *nm_i = NM_I(sbi);
2903 struct free_nid *i, *next_i;
2904 struct nat_entry *natvec[NATVEC_SIZE];
Jaegeuk Kim7aed0d42015-01-07 10:47:57 -08002905 struct nat_entry_set *setvec[SETVEC_SIZE];
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002906 nid_t nid = 0;
2907 unsigned int found;
2908
2909 if (!nm_i)
2910 return;
2911
2912 /* destroy free nid list */
Chao Yub8559dc2016-10-12 19:28:29 +08002913 spin_lock(&nm_i->nid_list_lock);
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002914 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
Fan Lia0761f62017-10-28 19:03:37 +08002915 __remove_free_nid(sbi, i, FREE_NID);
Chao Yub8559dc2016-10-12 19:28:29 +08002916 spin_unlock(&nm_i->nid_list_lock);
Chao Yucf0ee0f2014-04-02 08:55:00 +08002917 kmem_cache_free(free_nid_slab, i);
Chao Yub8559dc2016-10-12 19:28:29 +08002918 spin_lock(&nm_i->nid_list_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002919 }
Chao Yu9a4ffdf2017-09-29 13:59:35 +08002920 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
2921 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
2922 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
Chao Yub8559dc2016-10-12 19:28:29 +08002923 spin_unlock(&nm_i->nid_list_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002924
2925 /* destroy nat cache */
Jaegeuk Kimb873b792016-08-04 11:38:25 -07002926 down_write(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002927 while ((found = __gang_lookup_nat_cache(nm_i,
2928 nid, NATVEC_SIZE, natvec))) {
2929 unsigned idx;
Jaegeuk Kim7aed0d42015-01-07 10:47:57 -08002930
Gu Zhengb6ce3912014-03-07 18:43:24 +08002931 nid = nat_get_nid(natvec[found - 1]) + 1;
2932 for (idx = 0; idx < found; idx++)
2933 __del_from_nat_cache(nm_i, natvec[idx]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002934 }
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07002935 f2fs_bug_on(sbi, nm_i->nat_cnt);
Jaegeuk Kim7aed0d42015-01-07 10:47:57 -08002936
2937 /* destroy nat set cache */
2938 nid = 0;
2939 while ((found = __gang_lookup_nat_set(nm_i,
2940 nid, SETVEC_SIZE, setvec))) {
2941 unsigned idx;
2942
2943 nid = setvec[found - 1]->set + 1;
2944 for (idx = 0; idx < found; idx++) {
2945 /* entry_cnt is not zero, when cp_error was occurred */
2946 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
2947 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
2948 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2949 }
2950 }
Jaegeuk Kimb873b792016-08-04 11:38:25 -07002951 up_write(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002952
Chao Yu4ac91242017-02-23 10:53:49 +08002953 kvfree(nm_i->nat_block_bitmap);
Jaegeuk Kimbb1105e2018-03-09 17:42:28 -08002954 if (nm_i->free_nid_bitmap) {
2955 int i;
2956
2957 for (i = 0; i < nm_i->nat_blocks; i++)
2958 kvfree(nm_i->free_nid_bitmap[i]);
2959 kfree(nm_i->free_nid_bitmap);
2960 }
Chao Yu586d1492017-03-01 17:09:07 +08002961 kvfree(nm_i->free_nid_count);
Chao Yu4ac91242017-02-23 10:53:49 +08002962
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002963 kfree(nm_i->nat_bitmap);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08002964 kfree(nm_i->nat_bits);
Chao Yu599a09b2017-01-07 18:52:01 +08002965#ifdef CONFIG_F2FS_CHECK_FS
2966 kfree(nm_i->nat_bitmap_mir);
2967#endif
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002968 sbi->nm_info = NULL;
2969 kfree(nm_i);
2970}
2971
Chao Yu4d57b862018-05-30 00:20:41 +08002972int __init f2fs_create_node_manager_caches(void)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002973{
2974 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +08002975 sizeof(struct nat_entry));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002976 if (!nat_entry_slab)
Chao Yuaec71382014-06-24 09:18:20 +08002977 goto fail;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002978
2979 free_nid_slab = f2fs_kmem_cache_create("free_nid",
Gu Zhenge8512d22014-03-07 18:43:28 +08002980 sizeof(struct free_nid));
Chao Yuaec71382014-06-24 09:18:20 +08002981 if (!free_nid_slab)
Markus Elfringce3e6d22014-11-24 15:52:00 +01002982 goto destroy_nat_entry;
Chao Yuaec71382014-06-24 09:18:20 +08002983
2984 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2985 sizeof(struct nat_entry_set));
2986 if (!nat_entry_set_slab)
Markus Elfringce3e6d22014-11-24 15:52:00 +01002987 goto destroy_free_nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002988 return 0;
Chao Yuaec71382014-06-24 09:18:20 +08002989
Markus Elfringce3e6d22014-11-24 15:52:00 +01002990destroy_free_nid:
Chao Yuaec71382014-06-24 09:18:20 +08002991 kmem_cache_destroy(free_nid_slab);
Markus Elfringce3e6d22014-11-24 15:52:00 +01002992destroy_nat_entry:
Chao Yuaec71382014-06-24 09:18:20 +08002993 kmem_cache_destroy(nat_entry_slab);
2994fail:
2995 return -ENOMEM;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002996}
2997
Chao Yu4d57b862018-05-30 00:20:41 +08002998void f2fs_destroy_node_manager_caches(void)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002999{
Chao Yuaec71382014-06-24 09:18:20 +08003000 kmem_cache_destroy(nat_entry_set_slab);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09003001 kmem_cache_destroy(free_nid_slab);
3002 kmem_cache_destroy(nat_entry_slab);
3003}