blob: 763d5c0951d1c063baabcba3e0587ad7293c2bee [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kimd624c962012-11-02 17:13:32 +09003 * fs/f2fs/recovery.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kimd624c962012-11-02 17:13:32 +09007 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include "f2fs.h"
11#include "node.h"
12#include "segment.h"
13
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -070014/*
15 * Roll forward recovery scenarios.
16 *
17 * [Term] F: fsync_mark, D: dentry_mark
18 *
19 * 1. inode(x) | CP | inode(x) | dnode(F)
20 * -> Update the latest inode(x).
21 *
22 * 2. inode(x) | CP | inode(F) | dnode(F)
23 * -> No problem.
24 *
25 * 3. inode(x) | CP | dnode(F) | inode(x)
26 * -> Recover to the latest dnode(F), and drop the last inode(x)
27 *
28 * 4. inode(x) | CP | dnode(F) | inode(F)
29 * -> No problem.
30 *
31 * 5. CP | inode(x) | dnode(F)
32 * -> The inode(DF) was missing. Should drop this dnode(F).
33 *
34 * 6. CP | inode(DF) | dnode(F)
35 * -> No problem.
36 *
37 * 7. CP | dnode(F) | inode(DF)
38 * -> If f2fs_iget fails, then goto next to find inode(DF).
39 *
40 * 8. CP | dnode(F) | inode(x)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
42 * But it will fail due to no inode(DF).
43 */
44
Jaegeuk Kimd624c962012-11-02 17:13:32 +090045static struct kmem_cache *fsync_entry_slab;
46
Chao Yu4d57b862018-05-30 00:20:41 +080047bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090048{
Jaegeuk Kim41382ec2016-05-16 11:06:50 -070049 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
50
51 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090052 return false;
53 return true;
54}
55
56static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
57 nid_t ino)
58{
Jaegeuk Kimd624c962012-11-02 17:13:32 +090059 struct fsync_inode_entry *entry;
60
Chao Yu2d7b8222014-03-29 11:33:17 +080061 list_for_each_entry(entry, head, list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090062 if (entry->inode->i_ino == ino)
63 return entry;
Chao Yu2d7b8222014-03-29 11:33:17 +080064
Jaegeuk Kimd624c962012-11-02 17:13:32 +090065 return NULL;
66}
67
Jaegeuk Kimf4702d62016-09-09 16:48:15 -070068static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
Chao Yu4b2414d2017-08-08 10:54:31 +080069 struct list_head *head, nid_t ino, bool quota_inode)
Chao Yu3f8ab272016-04-29 20:13:37 +080070{
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070071 struct inode *inode;
Chao Yu3f8ab272016-04-29 20:13:37 +080072 struct fsync_inode_entry *entry;
Chao Yu4b2414d2017-08-08 10:54:31 +080073 int err;
Chao Yu3f8ab272016-04-29 20:13:37 +080074
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070075 inode = f2fs_iget_retry(sbi->sb, ino);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -070076 if (IS_ERR(inode))
77 return ERR_CAST(inode);
78
Chao Yu4b2414d2017-08-08 10:54:31 +080079 err = dquot_initialize(inode);
80 if (err)
81 goto err_out;
82
83 if (quota_inode) {
84 err = dquot_alloc_inode(inode);
85 if (err)
86 goto err_out;
87 }
88
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070089 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
Chao Yu3f8ab272016-04-29 20:13:37 +080090 entry->inode = inode;
91 list_add_tail(&entry->list, head);
92
93 return entry;
Chao Yu4b2414d2017-08-08 10:54:31 +080094err_out:
95 iput(inode);
96 return ERR_PTR(err);
Chao Yu3f8ab272016-04-29 20:13:37 +080097}
98
Sheng Yong26b5a072018-10-12 18:49:26 +080099static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
Chao Yu3f8ab272016-04-29 20:13:37 +0800100{
Sheng Yong26b5a072018-10-12 18:49:26 +0800101 if (drop) {
102 /* inode should not be recovered, drop it */
103 f2fs_inode_synced(entry->inode);
104 }
Chao Yu3f8ab272016-04-29 20:13:37 +0800105 iput(entry->inode);
106 list_del(&entry->list);
107 kmem_cache_free(fsync_entry_slab, entry);
108}
109
Chao Yuf61cce52016-05-07 16:15:05 +0800110static int recover_dentry(struct inode *inode, struct page *ipage,
111 struct list_head *dir_list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900112{
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900113 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900114 nid_t pino = le32_to_cpu(raw_inode->i_pino);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900115 struct f2fs_dir_entry *de;
Shuoran Liue7ba1082016-08-29 11:27:56 +0800116 struct fscrypt_name fname;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900117 struct page *page;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900118 struct inode *dir, *einode;
Chao Yuf61cce52016-05-07 16:15:05 +0800119 struct fsync_inode_entry *entry;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900120 int err = 0;
Shuoran Liue7ba1082016-08-29 11:27:56 +0800121 char *name;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900122
Chao Yuf61cce52016-05-07 16:15:05 +0800123 entry = get_fsync_inode(dir_list, pino);
124 if (!entry) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800125 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
126 pino, false);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -0700127 if (IS_ERR(entry)) {
128 dir = ERR_CAST(entry);
129 err = PTR_ERR(entry);
Chao Yuf61cce52016-05-07 16:15:05 +0800130 goto out;
131 }
Jaegeuk Kimed57c272014-04-15 11:19:28 +0900132 }
133
Chao Yuf61cce52016-05-07 16:15:05 +0800134 dir = entry->inode;
135
Shuoran Liue7ba1082016-08-29 11:27:56 +0800136 memset(&fname, 0, sizeof(struct fscrypt_name));
137 fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
138 fname.disk_name.name = raw_inode->i_name;
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700139
Shuoran Liue7ba1082016-08-29 11:27:56 +0800140 if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
Chao Yud96b14312013-12-23 11:12:21 +0800141 WARN_ON(1);
142 err = -ENAMETOOLONG;
Chao Yuf61cce52016-05-07 16:15:05 +0800143 goto out;
Chao Yud96b14312013-12-23 11:12:21 +0800144 }
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900145retry:
Shuoran Liue7ba1082016-08-29 11:27:56 +0800146 de = __f2fs_find_entry(dir, &fname, &page);
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700147 if (de && inode->i_ino == le32_to_cpu(de->ino))
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800148 goto out_put;
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700149
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900150 if (de) {
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700151 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900152 if (IS_ERR(einode)) {
153 WARN_ON(1);
Chao Yu5c1f9922014-04-28 17:58:34 +0800154 err = PTR_ERR(einode);
155 if (err == -ENOENT)
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900156 err = -EEXIST;
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800157 goto out_put;
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500158 }
Chao Yu4b2414d2017-08-08 10:54:31 +0800159
160 err = dquot_initialize(einode);
161 if (err) {
162 iput(einode);
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800163 goto out_put;
Chao Yu4b2414d2017-08-08 10:54:31 +0800164 }
165
Chao Yu4d57b862018-05-30 00:20:41 +0800166 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500167 if (err) {
168 iput(einode);
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800169 goto out_put;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900170 }
Chao Yudbeacf02014-09-24 18:17:04 +0800171 f2fs_delete_entry(de, page, dir, einode);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900172 iput(einode);
173 goto retry;
Chao Yu91246c22016-07-19 08:27:47 +0800174 } else if (IS_ERR(page)) {
175 err = PTR_ERR(page);
176 } else {
Chao Yu4d57b862018-05-30 00:20:41 +0800177 err = f2fs_add_dentry(dir, &fname, inode,
Chao Yu91246c22016-07-19 08:27:47 +0800178 inode->i_ino, inode->i_mode);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900179 }
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700180 if (err == -ENOMEM)
181 goto retry;
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500182 goto out;
183
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800184out_put:
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500185 f2fs_put_page(page, 0);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900186out:
Shuoran Liue7ba1082016-08-29 11:27:56 +0800187 if (file_enc_name(inode))
188 name = "<encrypted>";
189 else
190 name = raw_inode->i_name;
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800191 f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
192 __func__, ino_of_node(ipage), name,
193 IS_ERR(dir) ? 0 : dir->i_ino, err);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900194 return err;
195}
196
Chao Yuaf033b22018-09-20 20:05:00 +0800197static int recover_quota_data(struct inode *inode, struct page *page)
198{
199 struct f2fs_inode *raw = F2FS_INODE(page);
200 struct iattr attr;
201 uid_t i_uid = le32_to_cpu(raw->i_uid);
202 gid_t i_gid = le32_to_cpu(raw->i_gid);
203 int err;
204
205 memset(&attr, 0, sizeof(attr));
206
207 attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
208 attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
209
210 if (!uid_eq(attr.ia_uid, inode->i_uid))
211 attr.ia_valid |= ATTR_UID;
212 if (!gid_eq(attr.ia_gid, inode->i_gid))
213 attr.ia_valid |= ATTR_GID;
214
215 if (!attr.ia_valid)
216 return 0;
217
218 err = dquot_transfer(inode, &attr);
219 if (err)
220 set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
221 return err;
222}
223
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800224static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
225{
226 if (ri->i_inline & F2FS_PIN_FILE)
227 set_inode_flag(inode, FI_PIN_FILE);
228 else
229 clear_inode_flag(inode, FI_PIN_FILE);
230 if (ri->i_inline & F2FS_DATA_EXIST)
231 set_inode_flag(inode, FI_DATA_EXIST);
232 else
233 clear_inode_flag(inode, FI_DATA_EXIST);
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800234}
235
Chao Yuaf033b22018-09-20 20:05:00 +0800236static int recover_inode(struct inode *inode, struct page *page)
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700237{
238 struct f2fs_inode *raw = F2FS_INODE(page);
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700239 char *name;
Chao Yuaf033b22018-09-20 20:05:00 +0800240 int err;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700241
242 inode->i_mode = le16_to_cpu(raw->i_mode);
Chao Yuaf033b22018-09-20 20:05:00 +0800243
244 err = recover_quota_data(inode, page);
245 if (err)
246 return err;
247
Chao Yudc4cd122018-09-20 17:41:30 +0800248 i_uid_write(inode, le32_to_cpu(raw->i_uid));
249 i_gid_write(inode, le32_to_cpu(raw->i_gid));
Chao Yuf4474aa2018-09-25 15:35:58 +0800250
251 if (raw->i_inline & F2FS_EXTRA_ATTR) {
Chao Yu7beb01f2018-10-24 18:34:26 +0800252 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
Chao Yuf4474aa2018-09-25 15:35:58 +0800253 F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
254 i_projid)) {
255 projid_t i_projid;
Chao Yu78130812018-09-25 15:36:02 +0800256 kprojid_t kprojid;
Chao Yuf4474aa2018-09-25 15:35:58 +0800257
258 i_projid = (projid_t)le32_to_cpu(raw->i_projid);
Chao Yu78130812018-09-25 15:36:02 +0800259 kprojid = make_kprojid(&init_user_ns, i_projid);
260
261 if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
262 err = f2fs_transfer_project_quota(inode,
263 kprojid);
264 if (err)
265 return err;
266 F2FS_I(inode)->i_projid = kprojid;
267 }
Chao Yuf4474aa2018-09-25 15:35:58 +0800268 }
269 }
270
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -0700271 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
Chao Yu9f0552e2016-11-04 00:26:55 +0800272 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700273 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
274 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
Chao Yu9f0552e2016-11-04 00:26:55 +0800275 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700276 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
277 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900278
Jaegeuk Kim26787232016-11-28 15:33:38 -0800279 F2FS_I(inode)->i_advise = raw->i_advise;
Chao Yu19c73a62018-09-25 15:35:59 +0800280 F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
Chao Yu0c093b52018-10-07 03:03:38 +0800281 f2fs_set_inode_flags(inode);
Chao Yu7de36cf2018-09-25 15:36:00 +0800282 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
283 le16_to_cpu(raw->i_gc_failures);
Jaegeuk Kim26787232016-11-28 15:33:38 -0800284
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800285 recover_inline_flags(inode, raw);
286
Chao Yu4a1728c2018-09-25 15:36:03 +0800287 f2fs_mark_inode_dirty_sync(inode, true);
288
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700289 if (file_enc_name(inode))
290 name = "<encrypted>";
291 else
292 name = F2FS_INODE(page)->i_name;
293
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800294 f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
295 ino_of_node(page), name, raw->i_inline);
Chao Yuaf033b22018-09-20 20:05:00 +0800296 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900297}
298
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700299static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
300 bool check_only)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900301{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900302 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700303 struct page *page = NULL;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900304 block_t blkaddr;
Chao Yufb0e72c2018-02-03 17:44:39 +0800305 unsigned int loop_cnt = 0;
Chao Yu82902c02018-07-05 19:37:00 +0800306 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
307 valid_user_blocks(sbi);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900308 int err = 0;
309
310 /* get node pages in the current segment */
311 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Chao Yu695fd1e2014-02-27 19:52:21 +0800312 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900313
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900314 while (1) {
315 struct fsync_inode_entry *entry;
316
Chao Yue1da7872018-06-05 17:44:11 +0800317 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700318 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900319
Chao Yu4d57b862018-05-30 00:20:41 +0800320 page = f2fs_get_tmp_page(sbi, blkaddr);
Chao Yu77357302018-07-17 00:02:17 +0800321 if (IS_ERR(page)) {
322 err = PTR_ERR(page);
323 break;
324 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900325
Chao Yu98838572019-04-10 18:45:26 +0800326 if (!is_recoverable_dnode(page)) {
327 f2fs_put_page(page, 1);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900328 break;
Chao Yu98838572019-04-10 18:45:26 +0800329 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900330
331 if (!is_fsync_dnode(page))
332 goto next;
333
334 entry = get_fsync_inode(head, ino_of_node(page));
Chao Yud47b8712016-11-05 11:12:40 +0800335 if (!entry) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800336 bool quota_inode = false;
337
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700338 if (!check_only &&
339 IS_INODE(page) && is_dent_dnode(page)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800340 err = f2fs_recover_inode_page(sbi, page);
Chao Yu98838572019-04-10 18:45:26 +0800341 if (err) {
342 f2fs_put_page(page, 1);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900343 break;
Chao Yu98838572019-04-10 18:45:26 +0800344 }
Chao Yu4b2414d2017-08-08 10:54:31 +0800345 quota_inode = true;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900346 }
347
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700348 /*
349 * CP | dnode(F) | inode(DF)
350 * For this case, we should not give up now.
351 */
Chao Yu4b2414d2017-08-08 10:54:31 +0800352 entry = add_fsync_inode(sbi, head, ino_of_node(page),
353 quota_inode);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -0700354 if (IS_ERR(entry)) {
355 err = PTR_ERR(entry);
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800356 if (err == -ENOENT) {
357 err = 0;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700358 goto next;
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800359 }
Chao Yu98838572019-04-10 18:45:26 +0800360 f2fs_put_page(page, 1);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900361 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900362 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900363 }
Jaegeuk Kimaddbe452013-05-15 10:49:13 +0900364 entry->blkaddr = blkaddr;
365
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700366 if (IS_INODE(page) && is_dent_dnode(page))
367 entry->last_dentry = blkaddr;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900368next:
Chao Yufb0e72c2018-02-03 17:44:39 +0800369 /* sanity check in order to detect looped node chain */
370 if (++loop_cnt >= free_blocks ||
371 blkaddr == next_blkaddr_of_node(page)) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800372 f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
373 __func__, blkaddr,
374 next_blkaddr_of_node(page));
Chao Yu98838572019-04-10 18:45:26 +0800375 f2fs_put_page(page, 1);
Chao Yufb0e72c2018-02-03 17:44:39 +0800376 err = -EINVAL;
377 break;
378 }
379
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900380 /* check next segment */
381 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700382 f2fs_put_page(page, 1);
Chao Yu635aee12014-12-08 15:02:52 +0800383
Chao Yu4d57b862018-05-30 00:20:41 +0800384 f2fs_ra_meta_pages_cond(sbi, blkaddr);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900385 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900386 return err;
387}
388
Sheng Yong26b5a072018-10-12 18:49:26 +0800389static void destroy_fsync_dnodes(struct list_head *head, int drop)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900390{
Dan Carpenterd8b79b22013-01-20 18:02:58 +0300391 struct fsync_inode_entry *entry, *tmp;
392
Chao Yu3f8ab272016-04-29 20:13:37 +0800393 list_for_each_entry_safe(entry, tmp, head, list)
Sheng Yong26b5a072018-10-12 18:49:26 +0800394 del_fsync_inode(entry, drop);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900395}
396
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900397static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900398 block_t blkaddr, struct dnode_of_data *dn)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900399{
400 struct seg_entry *sentry;
401 unsigned int segno = GET_SEGNO(sbi, blkaddr);
Jaegeuk Kim491c0852014-02-04 13:01:10 +0900402 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900403 struct f2fs_summary_block *sum_node;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900404 struct f2fs_summary sum;
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900405 struct page *sum_page, *node_page;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700406 struct dnode_of_data tdn = *dn;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900407 nid_t ino, nid;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900408 struct inode *inode;
Jaegeuk Kimde936532013-08-12 21:08:03 +0900409 unsigned int offset;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900410 block_t bidx;
411 int i;
412
413 sentry = get_seg_entry(sbi, segno);
414 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900415 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900416
417 /* Get the previous summary */
Jaegeuk Kim125c9fb2017-08-12 21:33:23 -0700418 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900419 struct curseg_info *curseg = CURSEG_I(sbi, i);
420 if (curseg->segno == segno) {
421 sum = curseg->sum_blk->entries[blkoff];
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900422 goto got_it;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900423 }
424 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900425
Chao Yu4d57b862018-05-30 00:20:41 +0800426 sum_page = f2fs_get_sum_page(sbi, segno);
Jaegeuk Kimedc55aa2018-09-17 17:36:06 -0700427 if (IS_ERR(sum_page))
428 return PTR_ERR(sum_page);
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900429 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
430 sum = sum_node->entries[blkoff];
431 f2fs_put_page(sum_page, 1);
432got_it:
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900433 /* Use the locked dnode page and inode */
434 nid = le32_to_cpu(sum.nid);
435 if (dn->inode->i_ino == nid) {
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900436 tdn.nid = nid;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700437 if (!dn->inode_page_locked)
438 lock_page(dn->inode_page);
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900439 tdn.node_page = dn->inode_page;
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900440 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700441 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900442 } else if (dn->nid == nid) {
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900443 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700444 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900445 }
446
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900447 /* Get the node page */
Chao Yu4d57b862018-05-30 00:20:41 +0800448 node_page = f2fs_get_node_page(sbi, nid);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900449 if (IS_ERR(node_page))
450 return PTR_ERR(node_page);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900451
452 offset = ofs_of_node(node_page);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900453 ino = ino_of_node(node_page);
454 f2fs_put_page(node_page, 1);
455
Jaegeuk Kim60979112014-09-13 00:35:58 +0900456 if (ino != dn->inode->i_ino) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800457 int ret;
458
Jaegeuk Kim60979112014-09-13 00:35:58 +0900459 /* Deallocate previous index in the node page */
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700460 inode = f2fs_iget_retry(sbi->sb, ino);
Jaegeuk Kim60979112014-09-13 00:35:58 +0900461 if (IS_ERR(inode))
462 return PTR_ERR(inode);
Chao Yu4b2414d2017-08-08 10:54:31 +0800463
464 ret = dquot_initialize(inode);
465 if (ret) {
466 iput(inode);
467 return ret;
468 }
Jaegeuk Kim60979112014-09-13 00:35:58 +0900469 } else {
470 inode = dn->inode;
471 }
Namjae Jeon06025f42012-12-22 12:09:43 +0900472
Chao Yu4d57b862018-05-30 00:20:41 +0800473 bidx = f2fs_start_bidx_of_node(offset, inode) +
474 le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900475
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700476 /*
477 * if inode page is locked, unlock temporarily, but its reference
478 * count keeps alive.
479 */
480 if (ino == dn->inode->i_ino && dn->inode_page_locked)
481 unlock_page(dn->inode_page);
482
483 set_new_dnode(&tdn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +0800484 if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700485 goto out;
486
487 if (tdn.data_blkaddr == blkaddr)
Chao Yu4d57b862018-05-30 00:20:41 +0800488 f2fs_truncate_data_blocks_range(&tdn, 1);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700489
490 f2fs_put_dnode(&tdn);
491out:
492 if (ino != dn->inode->i_ino)
Jaegeuk Kim60979112014-09-13 00:35:58 +0900493 iput(inode);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700494 else if (dn->inode_page_locked)
495 lock_page(dn->inode_page);
496 return 0;
497
498truncate_out:
Chao Yu7a2af762017-07-19 00:19:06 +0800499 if (datablock_addr(tdn.inode, tdn.node_page,
500 tdn.ofs_in_node) == blkaddr)
Chao Yu4d57b862018-05-30 00:20:41 +0800501 f2fs_truncate_data_blocks_range(&tdn, 1);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700502 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
503 unlock_page(dn->inode_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900504 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900505}
506
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900507static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
Sheng Yonge17d4882017-11-22 18:23:40 +0800508 struct page *page)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900509{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900510 struct dnode_of_data dn;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900511 struct node_info ni;
Chao Yu81ca7352016-01-26 15:39:35 +0800512 unsigned int start, end;
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900513 int err = 0, recovered = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900514
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700515 /* step 1: recover xattr */
516 if (IS_INODE(page)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800517 f2fs_recover_inline_xattr(inode, page);
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700518 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
Chao Yu4d57b862018-05-30 00:20:41 +0800519 err = f2fs_recover_xattr_data(inode, page);
Chao Yud2600812017-02-08 17:39:45 +0800520 if (!err)
521 recovered++;
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700522 goto out;
523 }
Chao Yu70cfed82014-08-02 15:26:04 +0800524
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700525 /* step 2: recover inline data */
Chao Yu4d57b862018-05-30 00:20:41 +0800526 if (f2fs_recover_inline_data(inode, page))
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900527 goto out;
528
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700529 /* step 3: recover data indices */
Chao Yu4d57b862018-05-30 00:20:41 +0800530 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
Chao Yu81ca7352016-01-26 15:39:35 +0800531 end = start + ADDRS_PER_PAGE(page, inode);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900532
533 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700534retry_dn:
Chao Yu4d57b862018-05-30 00:20:41 +0800535 err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700536 if (err) {
537 if (err == -ENOMEM) {
538 congestion_wait(BLK_RW_ASYNC, HZ/50);
539 goto retry_dn;
540 }
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900541 goto out;
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700542 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900543
Chao Yubae0ee72018-12-25 17:43:42 +0800544 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900545
Chao Yu77357302018-07-17 00:02:17 +0800546 err = f2fs_get_node_info(sbi, dn.nid, &ni);
547 if (err)
548 goto err;
549
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700550 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
Chao Yu22d61e22019-04-15 15:28:37 +0800551
552 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800553 f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
554 inode->i_ino, ofs_of_node(dn.node_page),
555 ofs_of_node(page));
Chao Yu10f966b2019-06-20 11:36:14 +0800556 err = -EFSCORRUPTED;
Chao Yu22d61e22019-04-15 15:28:37 +0800557 goto err;
558 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900559
Chao Yu12a83432015-08-05 17:23:54 +0800560 for (; start < end; start++, dn.ofs_in_node++) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900561 block_t src, dest;
562
Chao Yu7a2af762017-07-19 00:19:06 +0800563 src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
564 dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900565
Chao Yu93770ab2019-04-15 15:26:32 +0800566 if (__is_valid_data_blkaddr(src) &&
567 !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
Chao Yu10f966b2019-06-20 11:36:14 +0800568 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +0800569 goto err;
570 }
571
572 if (__is_valid_data_blkaddr(dest) &&
573 !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
Chao Yu10f966b2019-06-20 11:36:14 +0800574 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +0800575 goto err;
576 }
577
Chao Yu12a83432015-08-05 17:23:54 +0800578 /* skip recovering if dest is the same as src */
579 if (src == dest)
580 continue;
581
582 /* dest is invalid, just invalidate src block */
583 if (dest == NULL_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800584 f2fs_truncate_data_blocks_range(&dn, 1);
Chao Yu12a83432015-08-05 17:23:54 +0800585 continue;
586 }
587
Jaegeuk Kim26787232016-11-28 15:33:38 -0800588 if (!file_keep_isize(inode) &&
Chao Yudba79f32017-01-25 10:52:39 +0800589 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
590 f2fs_i_size_write(inode,
591 (loff_t)(start + 1) << PAGE_SHIFT);
Jaegeuk Kim26de9b12016-05-20 20:42:37 -0700592
Chao Yu12a83432015-08-05 17:23:54 +0800593 /*
594 * dest is reserved block, invalidate src block
595 * and then reserve one new block in dnode page.
596 */
597 if (dest == NEW_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800598 f2fs_truncate_data_blocks_range(&dn, 1);
599 f2fs_reserve_new_block(&dn);
Chao Yu12a83432015-08-05 17:23:54 +0800600 continue;
601 }
602
603 /* dest is valid block, try to recover from src to dest */
Chao Yue1da7872018-06-05 17:44:11 +0800604 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
Jaegeuk Kime03b07d2015-04-01 19:38:20 -0700605
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900606 if (src == NULL_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800607 err = f2fs_reserve_new_block(&dn);
Arnd Bergmann7fa750a2018-08-13 23:38:06 +0200608 while (err &&
609 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
Chao Yu4d57b862018-05-30 00:20:41 +0800610 err = f2fs_reserve_new_block(&dn);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900611 /* We should not get -ENOSPC */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700612 f2fs_bug_on(sbi, err);
Jaegeuk Kim6f3ec992016-07-19 19:30:06 -0700613 if (err)
614 goto err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900615 }
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700616retry_prev:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900617 /* Check the previous node page having this index */
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900618 err = check_index_in_prev_nodes(sbi, dest, &dn);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700619 if (err) {
620 if (err == -ENOMEM) {
621 congestion_wait(BLK_RW_ASYNC, HZ/50);
622 goto retry_prev;
623 }
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900624 goto err;
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700625 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900626
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900627 /* write dummy data page */
Chao Yu528e3452015-05-28 19:15:35 +0800628 f2fs_replace_block(sbi, &dn, src, dest,
Chao Yu28bc1062016-02-06 14:40:34 +0800629 ni.version, false, false);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900630 recovered++;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900631 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900632 }
633
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900634 copy_node_footer(dn.node_page, page);
635 fill_node_footer(dn.node_page, dn.nid, ni.ino,
636 ofs_of_node(page), false);
637 set_page_dirty(dn.node_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900638err:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900639 f2fs_put_dnode(&dn);
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900640out:
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800641 f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
642 inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
643 recovered, err);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900644 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900645}
646
Chao Yuf61cce52016-05-07 16:15:05 +0800647static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
Sheng Yong26b5a072018-10-12 18:49:26 +0800648 struct list_head *tmp_inode_list, struct list_head *dir_list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900649{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900650 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700651 struct page *page = NULL;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900652 int err = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900653 block_t blkaddr;
654
655 /* get node pages in the current segment */
Chao Yub7973f22015-12-01 11:43:59 +0800656 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900657 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
658
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900659 while (1) {
660 struct fsync_inode_entry *entry;
661
Chao Yue1da7872018-06-05 17:44:11 +0800662 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900663 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900664
Chao Yu4d57b862018-05-30 00:20:41 +0800665 f2fs_ra_meta_pages_cond(sbi, blkaddr);
Chao Yu635aee12014-12-08 15:02:52 +0800666
Chao Yu4d57b862018-05-30 00:20:41 +0800667 page = f2fs_get_tmp_page(sbi, blkaddr);
Chao Yu77357302018-07-17 00:02:17 +0800668 if (IS_ERR(page)) {
669 err = PTR_ERR(page);
670 break;
671 }
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700672
Jaegeuk Kima468f0e2016-09-19 17:55:10 -0700673 if (!is_recoverable_dnode(page)) {
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700674 f2fs_put_page(page, 1);
675 break;
676 }
677
Chao Yuf61cce52016-05-07 16:15:05 +0800678 entry = get_fsync_inode(inode_list, ino_of_node(page));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900679 if (!entry)
680 goto next;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700681 /*
682 * inode(x) | CP | inode(x) | dnode(F)
683 * In this case, we can lose the latest inode(x).
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700684 * So, call recover_inode for the inode update.
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700685 */
Chao Yuaf033b22018-09-20 20:05:00 +0800686 if (IS_INODE(page)) {
687 err = recover_inode(entry->inode, page);
Chao Yu98838572019-04-10 18:45:26 +0800688 if (err) {
689 f2fs_put_page(page, 1);
Chao Yuaf033b22018-09-20 20:05:00 +0800690 break;
Chao Yu98838572019-04-10 18:45:26 +0800691 }
Chao Yuaf033b22018-09-20 20:05:00 +0800692 }
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700693 if (entry->last_dentry == blkaddr) {
Chao Yuf61cce52016-05-07 16:15:05 +0800694 err = recover_dentry(entry->inode, page, dir_list);
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700695 if (err) {
696 f2fs_put_page(page, 1);
697 break;
698 }
699 }
Sheng Yonge17d4882017-11-22 18:23:40 +0800700 err = do_recover_data(sbi, entry->inode, page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700701 if (err) {
702 f2fs_put_page(page, 1);
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900703 break;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700704 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900705
Chao Yu3f8ab272016-04-29 20:13:37 +0800706 if (entry->blkaddr == blkaddr)
Sheng Yong26b5a072018-10-12 18:49:26 +0800707 list_move_tail(&entry->list, tmp_inode_list);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900708next:
709 /* check next segment */
710 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700711 f2fs_put_page(page, 1);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900712 }
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900713 if (!err)
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -0700714 f2fs_allocate_new_segments(sbi, NO_CHECK_TYPE);
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900715 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900716}
717
Chao Yu4d57b862018-05-30 00:20:41 +0800718int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900719{
Sheng Yong26b5a072018-10-12 18:49:26 +0800720 struct list_head inode_list, tmp_inode_list;
Chao Yuf61cce52016-05-07 16:15:05 +0800721 struct list_head dir_list;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900722 int err;
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700723 int ret = 0;
Chao Yu4b2414d2017-08-08 10:54:31 +0800724 unsigned long s_flags = sbi->sb->s_flags;
Haicheng Liaabe5132013-10-23 12:39:32 +0800725 bool need_writecp = false;
Shin'ichiro Kawasakic426d992019-12-09 19:44:44 +0900726 bool fix_curseg_write_pointer = false;
Jaegeuk Kimea676732017-10-06 09:14:28 -0700727#ifdef CONFIG_QUOTA
728 int quota_enabled;
729#endif
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900730
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800731 if (s_flags & SB_RDONLY) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800732 f2fs_info(sbi, "recover fsync data on readonly fs");
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800733 sbi->sb->s_flags &= ~SB_RDONLY;
Chao Yu4b2414d2017-08-08 10:54:31 +0800734 }
735
736#ifdef CONFIG_QUOTA
737 /* Needed for iput() to work correctly and not trash data */
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800738 sbi->sb->s_flags |= SB_ACTIVE;
Chao Yu4b2414d2017-08-08 10:54:31 +0800739 /* Turn on quotas so that they are updated correctly */
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800740 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
Chao Yu4b2414d2017-08-08 10:54:31 +0800741#endif
742
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900743 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +0800744 sizeof(struct fsync_inode_entry));
Chao Yu4b2414d2017-08-08 10:54:31 +0800745 if (!fsync_entry_slab) {
746 err = -ENOMEM;
747 goto out;
748 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900749
750 INIT_LIST_HEAD(&inode_list);
Sheng Yong26b5a072018-10-12 18:49:26 +0800751 INIT_LIST_HEAD(&tmp_inode_list);
Chao Yuf61cce52016-05-07 16:15:05 +0800752 INIT_LIST_HEAD(&dir_list);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900753
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700754 /* prevent checkpoint */
755 mutex_lock(&sbi->cp_mutex);
756
Jaegeuk Kim315df832015-08-11 12:45:39 -0700757 /* step #1: find fsynced inode numbers */
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700758 err = find_fsync_dnodes(sbi, &inode_list, check_only);
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700759 if (err || list_empty(&inode_list))
Chao Yu4b2414d2017-08-08 10:54:31 +0800760 goto skip;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900761
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700762 if (check_only) {
763 ret = 1;
Chao Yu4b2414d2017-08-08 10:54:31 +0800764 goto skip;
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700765 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900766
Haicheng Liaabe5132013-10-23 12:39:32 +0800767 need_writecp = true;
Chao Yu691c6fd2013-09-24 09:26:24 +0800768
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900769 /* step #2: recover data */
Sheng Yong26b5a072018-10-12 18:49:26 +0800770 err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
Jaegeuk Kimb3073842014-08-08 10:18:43 -0700771 if (!err)
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700772 f2fs_bug_on(sbi, !list_empty(&inode_list));
Sheng Yong26b5a072018-10-12 18:49:26 +0800773 else {
774 /* restore s_flags to let iput() trash data */
775 sbi->sb->s_flags = s_flags;
776 }
Chao Yu4b2414d2017-08-08 10:54:31 +0800777skip:
Shin'ichiro Kawasakic426d992019-12-09 19:44:44 +0900778 fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
779
Sheng Yong26b5a072018-10-12 18:49:26 +0800780 destroy_fsync_dnodes(&inode_list, err);
781 destroy_fsync_dnodes(&tmp_inode_list, err);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700782
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700783 /* truncate meta pages to be used by the recovery */
784 truncate_inode_pages_range(META_MAPPING(sbi),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300785 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700786
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700787 if (err) {
788 truncate_inode_pages_final(NODE_MAPPING(sbi));
789 truncate_inode_pages_final(META_MAPPING(sbi));
790 }
Shin'ichiro Kawasakic426d992019-12-09 19:44:44 +0900791
792 /*
793 * If fsync data succeeds or there is no fsync data to recover,
794 * and the f2fs is not read only, check and fix zoned block devices'
795 * write pointer consistency.
796 */
797 if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
798 f2fs_sb_has_blkzoned(sbi)) {
799 err = f2fs_fix_curseg_write_pointer(sbi);
800 ret = err;
801 }
802
803 if (!err)
804 clear_sbi_flag(sbi, SBI_POR_DOING);
805
Jaegeuk Kima468f0e2016-09-19 17:55:10 -0700806 mutex_unlock(&sbi->cp_mutex);
807
Jaegeuk Kim9e1e6df2016-09-19 18:13:54 -0700808 /* let's drop all the directory inodes for clean checkpoint */
Sheng Yong26b5a072018-10-12 18:49:26 +0800809 destroy_fsync_dnodes(&dir_list, err);
Jaegeuk Kim9e1e6df2016-09-19 18:13:54 -0700810
Chao Yu13787522018-08-22 17:11:05 +0800811 if (need_writecp) {
812 set_sbi_flag(sbi, SBI_IS_RECOVERED);
813
814 if (!err) {
815 struct cp_control cpc = {
816 .reason = CP_RECOVERY,
817 };
818 err = f2fs_write_checkpoint(sbi, &cpc);
819 }
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700820 }
Chao Yuf61cce52016-05-07 16:15:05 +0800821
Chao Yuf61cce52016-05-07 16:15:05 +0800822 kmem_cache_destroy(fsync_entry_slab);
Chao Yu4b2414d2017-08-08 10:54:31 +0800823out:
824#ifdef CONFIG_QUOTA
825 /* Turn quotas off */
Jaegeuk Kimea676732017-10-06 09:14:28 -0700826 if (quota_enabled)
827 f2fs_quota_off_umount(sbi->sb);
Chao Yu4b2414d2017-08-08 10:54:31 +0800828#endif
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800829 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
Chao Yu4b2414d2017-08-08 10:54:31 +0800830
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700831 return ret ? ret: err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900832}