blob: da75d5d52f0ac52ac5a291d6bd19e07f9d5ed4c6 [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kimd624c962012-11-02 17:13:32 +09003 * fs/f2fs/recovery.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kimd624c962012-11-02 17:13:32 +09007 */
Daniel Rosenberg7ad08a52020-11-19 06:09:04 +00008#include <asm/unaligned.h>
Jaegeuk Kimd624c962012-11-02 17:13:32 +09009#include <linux/fs.h>
10#include <linux/f2fs_fs.h>
11#include "f2fs.h"
12#include "node.h"
13#include "segment.h"
14
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -070015/*
16 * Roll forward recovery scenarios.
17 *
18 * [Term] F: fsync_mark, D: dentry_mark
19 *
20 * 1. inode(x) | CP | inode(x) | dnode(F)
21 * -> Update the latest inode(x).
22 *
23 * 2. inode(x) | CP | inode(F) | dnode(F)
24 * -> No problem.
25 *
26 * 3. inode(x) | CP | dnode(F) | inode(x)
27 * -> Recover to the latest dnode(F), and drop the last inode(x)
28 *
29 * 4. inode(x) | CP | dnode(F) | inode(F)
30 * -> No problem.
31 *
32 * 5. CP | inode(x) | dnode(F)
33 * -> The inode(DF) was missing. Should drop this dnode(F).
34 *
35 * 6. CP | inode(DF) | dnode(F)
36 * -> No problem.
37 *
38 * 7. CP | dnode(F) | inode(DF)
39 * -> If f2fs_iget fails, then goto next to find inode(DF).
40 *
41 * 8. CP | dnode(F) | inode(x)
42 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * But it will fail due to no inode(DF).
44 */
45
Jaegeuk Kimd624c962012-11-02 17:13:32 +090046static struct kmem_cache *fsync_entry_slab;
47
Chao Yu4d57b862018-05-30 00:20:41 +080048bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090049{
Jaegeuk Kim41382ec2016-05-16 11:06:50 -070050 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
51
52 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090053 return false;
54 return true;
55}
56
57static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
58 nid_t ino)
59{
Jaegeuk Kimd624c962012-11-02 17:13:32 +090060 struct fsync_inode_entry *entry;
61
Chao Yu2d7b8222014-03-29 11:33:17 +080062 list_for_each_entry(entry, head, list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090063 if (entry->inode->i_ino == ino)
64 return entry;
Chao Yu2d7b8222014-03-29 11:33:17 +080065
Jaegeuk Kimd624c962012-11-02 17:13:32 +090066 return NULL;
67}
68
Jaegeuk Kimf4702d62016-09-09 16:48:15 -070069static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
Chao Yu4b2414d2017-08-08 10:54:31 +080070 struct list_head *head, nid_t ino, bool quota_inode)
Chao Yu3f8ab272016-04-29 20:13:37 +080071{
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070072 struct inode *inode;
Chao Yu3f8ab272016-04-29 20:13:37 +080073 struct fsync_inode_entry *entry;
Chao Yu4b2414d2017-08-08 10:54:31 +080074 int err;
Chao Yu3f8ab272016-04-29 20:13:37 +080075
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070076 inode = f2fs_iget_retry(sbi->sb, ino);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -070077 if (IS_ERR(inode))
78 return ERR_CAST(inode);
79
Chao Yu4b2414d2017-08-08 10:54:31 +080080 err = dquot_initialize(inode);
81 if (err)
82 goto err_out;
83
84 if (quota_inode) {
85 err = dquot_alloc_inode(inode);
86 if (err)
87 goto err_out;
88 }
89
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070090 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
Chao Yu3f8ab272016-04-29 20:13:37 +080091 entry->inode = inode;
92 list_add_tail(&entry->list, head);
93
94 return entry;
Chao Yu4b2414d2017-08-08 10:54:31 +080095err_out:
96 iput(inode);
97 return ERR_PTR(err);
Chao Yu3f8ab272016-04-29 20:13:37 +080098}
99
Sheng Yong26b5a072018-10-12 18:49:26 +0800100static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
Chao Yu3f8ab272016-04-29 20:13:37 +0800101{
Sheng Yong26b5a072018-10-12 18:49:26 +0800102 if (drop) {
103 /* inode should not be recovered, drop it */
104 f2fs_inode_synced(entry->inode);
105 }
Chao Yu3f8ab272016-04-29 20:13:37 +0800106 iput(entry->inode);
107 list_del(&entry->list);
108 kmem_cache_free(fsync_entry_slab, entry);
109}
110
Eric Biggers43c780b2020-05-07 00:59:04 -0700111static int init_recovered_filename(const struct inode *dir,
112 struct f2fs_inode *raw_inode,
113 struct f2fs_filename *fname,
114 struct qstr *usr_fname)
115{
116 int err;
117
118 memset(fname, 0, sizeof(*fname));
119 fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
120 fname->disk_name.name = raw_inode->i_name;
121
122 if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
123 return -ENAMETOOLONG;
124
125 if (!IS_ENCRYPTED(dir)) {
126 usr_fname->name = fname->disk_name.name;
127 usr_fname->len = fname->disk_name.len;
128 fname->usr_fname = usr_fname;
129 }
130
131 /* Compute the hash of the filename */
Daniel Rosenberg7ad08a52020-11-19 06:09:04 +0000132 if (IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)) {
133 /*
134 * In this case the hash isn't computable without the key, so it
135 * was saved on-disk.
136 */
137 if (fname->disk_name.len + sizeof(f2fs_hash_t) > F2FS_NAME_LEN)
138 return -EINVAL;
139 fname->hash = get_unaligned((f2fs_hash_t *)
140 &raw_inode->i_name[fname->disk_name.len]);
141 } else if (IS_CASEFOLDED(dir)) {
Eric Biggers43c780b2020-05-07 00:59:04 -0700142 err = f2fs_init_casefolded_name(dir, fname);
143 if (err)
144 return err;
145 f2fs_hash_filename(dir, fname);
146#ifdef CONFIG_UNICODE
147 /* Case-sensitive match is fine for recovery */
148 kfree(fname->cf_name.name);
149 fname->cf_name.name = NULL;
150#endif
151 } else {
152 f2fs_hash_filename(dir, fname);
153 }
154 return 0;
155}
156
Chao Yuf61cce52016-05-07 16:15:05 +0800157static int recover_dentry(struct inode *inode, struct page *ipage,
158 struct list_head *dir_list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900159{
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900160 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900161 nid_t pino = le32_to_cpu(raw_inode->i_pino);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900162 struct f2fs_dir_entry *de;
Eric Biggers43c780b2020-05-07 00:59:04 -0700163 struct f2fs_filename fname;
164 struct qstr usr_fname;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900165 struct page *page;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900166 struct inode *dir, *einode;
Chao Yuf61cce52016-05-07 16:15:05 +0800167 struct fsync_inode_entry *entry;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900168 int err = 0;
Shuoran Liue7ba1082016-08-29 11:27:56 +0800169 char *name;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900170
Chao Yuf61cce52016-05-07 16:15:05 +0800171 entry = get_fsync_inode(dir_list, pino);
172 if (!entry) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800173 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
174 pino, false);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -0700175 if (IS_ERR(entry)) {
176 dir = ERR_CAST(entry);
177 err = PTR_ERR(entry);
Chao Yuf61cce52016-05-07 16:15:05 +0800178 goto out;
179 }
Jaegeuk Kimed57c272014-04-15 11:19:28 +0900180 }
181
Chao Yuf61cce52016-05-07 16:15:05 +0800182 dir = entry->inode;
Eric Biggers43c780b2020-05-07 00:59:04 -0700183 err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
184 if (err)
Chao Yuf61cce52016-05-07 16:15:05 +0800185 goto out;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900186retry:
Shuoran Liue7ba1082016-08-29 11:27:56 +0800187 de = __f2fs_find_entry(dir, &fname, &page);
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700188 if (de && inode->i_ino == le32_to_cpu(de->ino))
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800189 goto out_put;
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700190
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900191 if (de) {
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700192 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900193 if (IS_ERR(einode)) {
194 WARN_ON(1);
Chao Yu5c1f9922014-04-28 17:58:34 +0800195 err = PTR_ERR(einode);
196 if (err == -ENOENT)
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900197 err = -EEXIST;
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800198 goto out_put;
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500199 }
Chao Yu4b2414d2017-08-08 10:54:31 +0800200
201 err = dquot_initialize(einode);
202 if (err) {
203 iput(einode);
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800204 goto out_put;
Chao Yu4b2414d2017-08-08 10:54:31 +0800205 }
206
Chao Yu4d57b862018-05-30 00:20:41 +0800207 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500208 if (err) {
209 iput(einode);
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800210 goto out_put;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900211 }
Chao Yudbeacf02014-09-24 18:17:04 +0800212 f2fs_delete_entry(de, page, dir, einode);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900213 iput(einode);
214 goto retry;
Chao Yu91246c22016-07-19 08:27:47 +0800215 } else if (IS_ERR(page)) {
216 err = PTR_ERR(page);
217 } else {
Chao Yu4d57b862018-05-30 00:20:41 +0800218 err = f2fs_add_dentry(dir, &fname, inode,
Chao Yu91246c22016-07-19 08:27:47 +0800219 inode->i_ino, inode->i_mode);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900220 }
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700221 if (err == -ENOMEM)
222 goto retry;
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500223 goto out;
224
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800225out_put:
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500226 f2fs_put_page(page, 0);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900227out:
Shuoran Liue7ba1082016-08-29 11:27:56 +0800228 if (file_enc_name(inode))
229 name = "<encrypted>";
230 else
231 name = raw_inode->i_name;
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800232 f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
233 __func__, ino_of_node(ipage), name,
234 IS_ERR(dir) ? 0 : dir->i_ino, err);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900235 return err;
236}
237
Chao Yuaf033b22018-09-20 20:05:00 +0800238static int recover_quota_data(struct inode *inode, struct page *page)
239{
240 struct f2fs_inode *raw = F2FS_INODE(page);
241 struct iattr attr;
242 uid_t i_uid = le32_to_cpu(raw->i_uid);
243 gid_t i_gid = le32_to_cpu(raw->i_gid);
244 int err;
245
246 memset(&attr, 0, sizeof(attr));
247
248 attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
249 attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
250
251 if (!uid_eq(attr.ia_uid, inode->i_uid))
252 attr.ia_valid |= ATTR_UID;
253 if (!gid_eq(attr.ia_gid, inode->i_gid))
254 attr.ia_valid |= ATTR_GID;
255
256 if (!attr.ia_valid)
257 return 0;
258
259 err = dquot_transfer(inode, &attr);
260 if (err)
261 set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
262 return err;
263}
264
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800265static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
266{
267 if (ri->i_inline & F2FS_PIN_FILE)
268 set_inode_flag(inode, FI_PIN_FILE);
269 else
270 clear_inode_flag(inode, FI_PIN_FILE);
271 if (ri->i_inline & F2FS_DATA_EXIST)
272 set_inode_flag(inode, FI_DATA_EXIST);
273 else
274 clear_inode_flag(inode, FI_DATA_EXIST);
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800275}
276
Chao Yuaf033b22018-09-20 20:05:00 +0800277static int recover_inode(struct inode *inode, struct page *page)
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700278{
279 struct f2fs_inode *raw = F2FS_INODE(page);
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700280 char *name;
Chao Yuaf033b22018-09-20 20:05:00 +0800281 int err;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700282
283 inode->i_mode = le16_to_cpu(raw->i_mode);
Chao Yuaf033b22018-09-20 20:05:00 +0800284
285 err = recover_quota_data(inode, page);
286 if (err)
287 return err;
288
Chao Yudc4cd122018-09-20 17:41:30 +0800289 i_uid_write(inode, le32_to_cpu(raw->i_uid));
290 i_gid_write(inode, le32_to_cpu(raw->i_gid));
Chao Yuf4474aa2018-09-25 15:35:58 +0800291
292 if (raw->i_inline & F2FS_EXTRA_ATTR) {
Chao Yu7beb01f2018-10-24 18:34:26 +0800293 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
Chao Yuf4474aa2018-09-25 15:35:58 +0800294 F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
295 i_projid)) {
296 projid_t i_projid;
Chao Yu78130812018-09-25 15:36:02 +0800297 kprojid_t kprojid;
Chao Yuf4474aa2018-09-25 15:35:58 +0800298
299 i_projid = (projid_t)le32_to_cpu(raw->i_projid);
Chao Yu78130812018-09-25 15:36:02 +0800300 kprojid = make_kprojid(&init_user_ns, i_projid);
301
302 if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
303 err = f2fs_transfer_project_quota(inode,
304 kprojid);
305 if (err)
306 return err;
307 F2FS_I(inode)->i_projid = kprojid;
308 }
Chao Yuf4474aa2018-09-25 15:35:58 +0800309 }
310 }
311
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -0700312 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
Chao Yu9f0552e2016-11-04 00:26:55 +0800313 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700314 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
315 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
Chao Yu9f0552e2016-11-04 00:26:55 +0800316 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700317 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
318 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900319
Jaegeuk Kim26787232016-11-28 15:33:38 -0800320 F2FS_I(inode)->i_advise = raw->i_advise;
Chao Yu19c73a62018-09-25 15:35:59 +0800321 F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
Chao Yu0c093b52018-10-07 03:03:38 +0800322 f2fs_set_inode_flags(inode);
Chao Yu7de36cf2018-09-25 15:36:00 +0800323 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
324 le16_to_cpu(raw->i_gc_failures);
Jaegeuk Kim26787232016-11-28 15:33:38 -0800325
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800326 recover_inline_flags(inode, raw);
327
Chao Yu4a1728c2018-09-25 15:36:03 +0800328 f2fs_mark_inode_dirty_sync(inode, true);
329
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700330 if (file_enc_name(inode))
331 name = "<encrypted>";
332 else
333 name = F2FS_INODE(page)->i_name;
334
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800335 f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
336 ino_of_node(page), name, raw->i_inline);
Chao Yuaf033b22018-09-20 20:05:00 +0800337 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900338}
339
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700340static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
341 bool check_only)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900342{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900343 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700344 struct page *page = NULL;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900345 block_t blkaddr;
Chao Yufb0e72c2018-02-03 17:44:39 +0800346 unsigned int loop_cnt = 0;
Chao Yu82902c02018-07-05 19:37:00 +0800347 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
348 valid_user_blocks(sbi);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900349 int err = 0;
350
351 /* get node pages in the current segment */
352 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Chao Yu695fd1e2014-02-27 19:52:21 +0800353 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900354
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900355 while (1) {
356 struct fsync_inode_entry *entry;
357
Chao Yue1da7872018-06-05 17:44:11 +0800358 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700359 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900360
Chao Yu4d57b862018-05-30 00:20:41 +0800361 page = f2fs_get_tmp_page(sbi, blkaddr);
Chao Yu77357302018-07-17 00:02:17 +0800362 if (IS_ERR(page)) {
363 err = PTR_ERR(page);
364 break;
365 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900366
Chao Yu98838572019-04-10 18:45:26 +0800367 if (!is_recoverable_dnode(page)) {
368 f2fs_put_page(page, 1);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900369 break;
Chao Yu98838572019-04-10 18:45:26 +0800370 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900371
372 if (!is_fsync_dnode(page))
373 goto next;
374
375 entry = get_fsync_inode(head, ino_of_node(page));
Chao Yud47b8712016-11-05 11:12:40 +0800376 if (!entry) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800377 bool quota_inode = false;
378
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700379 if (!check_only &&
380 IS_INODE(page) && is_dent_dnode(page)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800381 err = f2fs_recover_inode_page(sbi, page);
Chao Yu98838572019-04-10 18:45:26 +0800382 if (err) {
383 f2fs_put_page(page, 1);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900384 break;
Chao Yu98838572019-04-10 18:45:26 +0800385 }
Chao Yu4b2414d2017-08-08 10:54:31 +0800386 quota_inode = true;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900387 }
388
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700389 /*
390 * CP | dnode(F) | inode(DF)
391 * For this case, we should not give up now.
392 */
Chao Yu4b2414d2017-08-08 10:54:31 +0800393 entry = add_fsync_inode(sbi, head, ino_of_node(page),
394 quota_inode);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -0700395 if (IS_ERR(entry)) {
396 err = PTR_ERR(entry);
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800397 if (err == -ENOENT) {
398 err = 0;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700399 goto next;
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800400 }
Chao Yu98838572019-04-10 18:45:26 +0800401 f2fs_put_page(page, 1);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900402 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900403 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900404 }
Jaegeuk Kimaddbe452013-05-15 10:49:13 +0900405 entry->blkaddr = blkaddr;
406
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700407 if (IS_INODE(page) && is_dent_dnode(page))
408 entry->last_dentry = blkaddr;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900409next:
Chao Yufb0e72c2018-02-03 17:44:39 +0800410 /* sanity check in order to detect looped node chain */
411 if (++loop_cnt >= free_blocks ||
412 blkaddr == next_blkaddr_of_node(page)) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800413 f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
414 __func__, blkaddr,
415 next_blkaddr_of_node(page));
Chao Yu98838572019-04-10 18:45:26 +0800416 f2fs_put_page(page, 1);
Chao Yufb0e72c2018-02-03 17:44:39 +0800417 err = -EINVAL;
418 break;
419 }
420
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900421 /* check next segment */
422 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700423 f2fs_put_page(page, 1);
Chao Yu635aee12014-12-08 15:02:52 +0800424
Chao Yu4d57b862018-05-30 00:20:41 +0800425 f2fs_ra_meta_pages_cond(sbi, blkaddr);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900426 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900427 return err;
428}
429
Sheng Yong26b5a072018-10-12 18:49:26 +0800430static void destroy_fsync_dnodes(struct list_head *head, int drop)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900431{
Dan Carpenterd8b79b22013-01-20 18:02:58 +0300432 struct fsync_inode_entry *entry, *tmp;
433
Chao Yu3f8ab272016-04-29 20:13:37 +0800434 list_for_each_entry_safe(entry, tmp, head, list)
Sheng Yong26b5a072018-10-12 18:49:26 +0800435 del_fsync_inode(entry, drop);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900436}
437
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900438static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900439 block_t blkaddr, struct dnode_of_data *dn)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900440{
441 struct seg_entry *sentry;
442 unsigned int segno = GET_SEGNO(sbi, blkaddr);
Jaegeuk Kim491c0852014-02-04 13:01:10 +0900443 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900444 struct f2fs_summary_block *sum_node;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900445 struct f2fs_summary sum;
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900446 struct page *sum_page, *node_page;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700447 struct dnode_of_data tdn = *dn;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900448 nid_t ino, nid;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900449 struct inode *inode;
Jaegeuk Kimde936532013-08-12 21:08:03 +0900450 unsigned int offset;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900451 block_t bidx;
452 int i;
453
454 sentry = get_seg_entry(sbi, segno);
455 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900456 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900457
458 /* Get the previous summary */
Jaegeuk Kim125c9fb2017-08-12 21:33:23 -0700459 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900460 struct curseg_info *curseg = CURSEG_I(sbi, i);
461 if (curseg->segno == segno) {
462 sum = curseg->sum_blk->entries[blkoff];
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900463 goto got_it;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900464 }
465 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900466
Chao Yu4d57b862018-05-30 00:20:41 +0800467 sum_page = f2fs_get_sum_page(sbi, segno);
Jaegeuk Kimedc55aa2018-09-17 17:36:06 -0700468 if (IS_ERR(sum_page))
469 return PTR_ERR(sum_page);
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900470 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
471 sum = sum_node->entries[blkoff];
472 f2fs_put_page(sum_page, 1);
473got_it:
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900474 /* Use the locked dnode page and inode */
475 nid = le32_to_cpu(sum.nid);
476 if (dn->inode->i_ino == nid) {
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900477 tdn.nid = nid;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700478 if (!dn->inode_page_locked)
479 lock_page(dn->inode_page);
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900480 tdn.node_page = dn->inode_page;
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900481 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700482 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900483 } else if (dn->nid == nid) {
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900484 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700485 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900486 }
487
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900488 /* Get the node page */
Chao Yu4d57b862018-05-30 00:20:41 +0800489 node_page = f2fs_get_node_page(sbi, nid);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900490 if (IS_ERR(node_page))
491 return PTR_ERR(node_page);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900492
493 offset = ofs_of_node(node_page);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900494 ino = ino_of_node(node_page);
495 f2fs_put_page(node_page, 1);
496
Jaegeuk Kim60979112014-09-13 00:35:58 +0900497 if (ino != dn->inode->i_ino) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800498 int ret;
499
Jaegeuk Kim60979112014-09-13 00:35:58 +0900500 /* Deallocate previous index in the node page */
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700501 inode = f2fs_iget_retry(sbi->sb, ino);
Jaegeuk Kim60979112014-09-13 00:35:58 +0900502 if (IS_ERR(inode))
503 return PTR_ERR(inode);
Chao Yu4b2414d2017-08-08 10:54:31 +0800504
505 ret = dquot_initialize(inode);
506 if (ret) {
507 iput(inode);
508 return ret;
509 }
Jaegeuk Kim60979112014-09-13 00:35:58 +0900510 } else {
511 inode = dn->inode;
512 }
Namjae Jeon06025f42012-12-22 12:09:43 +0900513
Chao Yu4d57b862018-05-30 00:20:41 +0800514 bidx = f2fs_start_bidx_of_node(offset, inode) +
515 le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900516
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700517 /*
518 * if inode page is locked, unlock temporarily, but its reference
519 * count keeps alive.
520 */
521 if (ino == dn->inode->i_ino && dn->inode_page_locked)
522 unlock_page(dn->inode_page);
523
524 set_new_dnode(&tdn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +0800525 if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700526 goto out;
527
528 if (tdn.data_blkaddr == blkaddr)
Chao Yu4d57b862018-05-30 00:20:41 +0800529 f2fs_truncate_data_blocks_range(&tdn, 1);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700530
531 f2fs_put_dnode(&tdn);
532out:
533 if (ino != dn->inode->i_ino)
Jaegeuk Kim60979112014-09-13 00:35:58 +0900534 iput(inode);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700535 else if (dn->inode_page_locked)
536 lock_page(dn->inode_page);
537 return 0;
538
539truncate_out:
Chao Yua2ced1c2020-02-14 17:44:10 +0800540 if (f2fs_data_blkaddr(&tdn) == blkaddr)
Chao Yu4d57b862018-05-30 00:20:41 +0800541 f2fs_truncate_data_blocks_range(&tdn, 1);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700542 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
543 unlock_page(dn->inode_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900544 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900545}
546
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900547static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
Sheng Yonge17d4882017-11-22 18:23:40 +0800548 struct page *page)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900549{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900550 struct dnode_of_data dn;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900551 struct node_info ni;
Chao Yu81ca7352016-01-26 15:39:35 +0800552 unsigned int start, end;
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900553 int err = 0, recovered = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900554
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700555 /* step 1: recover xattr */
556 if (IS_INODE(page)) {
Chao Yu9627a7b2020-07-06 18:23:36 +0800557 err = f2fs_recover_inline_xattr(inode, page);
558 if (err)
559 goto out;
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700560 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
Chao Yu4d57b862018-05-30 00:20:41 +0800561 err = f2fs_recover_xattr_data(inode, page);
Chao Yud2600812017-02-08 17:39:45 +0800562 if (!err)
563 recovered++;
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700564 goto out;
565 }
Chao Yu70cfed82014-08-02 15:26:04 +0800566
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700567 /* step 2: recover inline data */
Chao Yu9627a7b2020-07-06 18:23:36 +0800568 err = f2fs_recover_inline_data(inode, page);
569 if (err) {
570 if (err == 1)
571 err = 0;
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900572 goto out;
Chao Yu9627a7b2020-07-06 18:23:36 +0800573 }
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900574
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700575 /* step 3: recover data indices */
Chao Yu4d57b862018-05-30 00:20:41 +0800576 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
Chao Yu81ca7352016-01-26 15:39:35 +0800577 end = start + ADDRS_PER_PAGE(page, inode);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900578
579 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700580retry_dn:
Chao Yu4d57b862018-05-30 00:20:41 +0800581 err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700582 if (err) {
583 if (err == -ENOMEM) {
Chao Yu5df7731f2020-02-17 17:45:44 +0800584 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700585 goto retry_dn;
586 }
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900587 goto out;
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700588 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900589
Chao Yubae0ee72018-12-25 17:43:42 +0800590 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900591
Chao Yu77357302018-07-17 00:02:17 +0800592 err = f2fs_get_node_info(sbi, dn.nid, &ni);
593 if (err)
594 goto err;
595
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700596 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
Chao Yu22d61e22019-04-15 15:28:37 +0800597
598 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800599 f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
600 inode->i_ino, ofs_of_node(dn.node_page),
601 ofs_of_node(page));
Chao Yu10f966b2019-06-20 11:36:14 +0800602 err = -EFSCORRUPTED;
Chao Yu22d61e22019-04-15 15:28:37 +0800603 goto err;
604 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900605
Chao Yu12a83432015-08-05 17:23:54 +0800606 for (; start < end; start++, dn.ofs_in_node++) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900607 block_t src, dest;
608
Chao Yua2ced1c2020-02-14 17:44:10 +0800609 src = f2fs_data_blkaddr(&dn);
610 dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900611
Chao Yu93770ab2019-04-15 15:26:32 +0800612 if (__is_valid_data_blkaddr(src) &&
613 !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
Chao Yu10f966b2019-06-20 11:36:14 +0800614 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +0800615 goto err;
616 }
617
618 if (__is_valid_data_blkaddr(dest) &&
619 !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
Chao Yu10f966b2019-06-20 11:36:14 +0800620 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +0800621 goto err;
622 }
623
Chao Yu12a83432015-08-05 17:23:54 +0800624 /* skip recovering if dest is the same as src */
625 if (src == dest)
626 continue;
627
628 /* dest is invalid, just invalidate src block */
629 if (dest == NULL_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800630 f2fs_truncate_data_blocks_range(&dn, 1);
Chao Yu12a83432015-08-05 17:23:54 +0800631 continue;
632 }
633
Jaegeuk Kim26787232016-11-28 15:33:38 -0800634 if (!file_keep_isize(inode) &&
Chao Yudba79f32017-01-25 10:52:39 +0800635 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
636 f2fs_i_size_write(inode,
637 (loff_t)(start + 1) << PAGE_SHIFT);
Jaegeuk Kim26de9b12016-05-20 20:42:37 -0700638
Chao Yu12a83432015-08-05 17:23:54 +0800639 /*
640 * dest is reserved block, invalidate src block
641 * and then reserve one new block in dnode page.
642 */
643 if (dest == NEW_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800644 f2fs_truncate_data_blocks_range(&dn, 1);
645 f2fs_reserve_new_block(&dn);
Chao Yu12a83432015-08-05 17:23:54 +0800646 continue;
647 }
648
649 /* dest is valid block, try to recover from src to dest */
Chao Yue1da7872018-06-05 17:44:11 +0800650 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
Jaegeuk Kime03b07d2015-04-01 19:38:20 -0700651
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900652 if (src == NULL_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800653 err = f2fs_reserve_new_block(&dn);
Arnd Bergmann7fa750a2018-08-13 23:38:06 +0200654 while (err &&
655 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
Chao Yu4d57b862018-05-30 00:20:41 +0800656 err = f2fs_reserve_new_block(&dn);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900657 /* We should not get -ENOSPC */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700658 f2fs_bug_on(sbi, err);
Jaegeuk Kim6f3ec992016-07-19 19:30:06 -0700659 if (err)
660 goto err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900661 }
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700662retry_prev:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900663 /* Check the previous node page having this index */
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900664 err = check_index_in_prev_nodes(sbi, dest, &dn);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700665 if (err) {
666 if (err == -ENOMEM) {
Chao Yu5df7731f2020-02-17 17:45:44 +0800667 congestion_wait(BLK_RW_ASYNC,
668 DEFAULT_IO_TIMEOUT);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700669 goto retry_prev;
670 }
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900671 goto err;
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700672 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900673
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900674 /* write dummy data page */
Chao Yu528e3452015-05-28 19:15:35 +0800675 f2fs_replace_block(sbi, &dn, src, dest,
Chao Yu28bc1062016-02-06 14:40:34 +0800676 ni.version, false, false);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900677 recovered++;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900678 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900679 }
680
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900681 copy_node_footer(dn.node_page, page);
682 fill_node_footer(dn.node_page, dn.nid, ni.ino,
683 ofs_of_node(page), false);
684 set_page_dirty(dn.node_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900685err:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900686 f2fs_put_dnode(&dn);
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900687out:
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800688 f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
689 inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
690 recovered, err);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900691 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900692}
693
Chao Yuf61cce52016-05-07 16:15:05 +0800694static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
Sheng Yong26b5a072018-10-12 18:49:26 +0800695 struct list_head *tmp_inode_list, struct list_head *dir_list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900696{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900697 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700698 struct page *page = NULL;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900699 int err = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900700 block_t blkaddr;
701
702 /* get node pages in the current segment */
Chao Yub7973f22015-12-01 11:43:59 +0800703 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900704 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
705
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900706 while (1) {
707 struct fsync_inode_entry *entry;
708
Chao Yue1da7872018-06-05 17:44:11 +0800709 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900710 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900711
Chao Yu4d57b862018-05-30 00:20:41 +0800712 f2fs_ra_meta_pages_cond(sbi, blkaddr);
Chao Yu635aee12014-12-08 15:02:52 +0800713
Chao Yu4d57b862018-05-30 00:20:41 +0800714 page = f2fs_get_tmp_page(sbi, blkaddr);
Chao Yu77357302018-07-17 00:02:17 +0800715 if (IS_ERR(page)) {
716 err = PTR_ERR(page);
717 break;
718 }
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700719
Jaegeuk Kima468f0e2016-09-19 17:55:10 -0700720 if (!is_recoverable_dnode(page)) {
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700721 f2fs_put_page(page, 1);
722 break;
723 }
724
Chao Yuf61cce52016-05-07 16:15:05 +0800725 entry = get_fsync_inode(inode_list, ino_of_node(page));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900726 if (!entry)
727 goto next;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700728 /*
729 * inode(x) | CP | inode(x) | dnode(F)
730 * In this case, we can lose the latest inode(x).
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700731 * So, call recover_inode for the inode update.
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700732 */
Chao Yuaf033b22018-09-20 20:05:00 +0800733 if (IS_INODE(page)) {
734 err = recover_inode(entry->inode, page);
Chao Yu98838572019-04-10 18:45:26 +0800735 if (err) {
736 f2fs_put_page(page, 1);
Chao Yuaf033b22018-09-20 20:05:00 +0800737 break;
Chao Yu98838572019-04-10 18:45:26 +0800738 }
Chao Yuaf033b22018-09-20 20:05:00 +0800739 }
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700740 if (entry->last_dentry == blkaddr) {
Chao Yuf61cce52016-05-07 16:15:05 +0800741 err = recover_dentry(entry->inode, page, dir_list);
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700742 if (err) {
743 f2fs_put_page(page, 1);
744 break;
745 }
746 }
Sheng Yonge17d4882017-11-22 18:23:40 +0800747 err = do_recover_data(sbi, entry->inode, page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700748 if (err) {
749 f2fs_put_page(page, 1);
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900750 break;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700751 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900752
Chao Yu3f8ab272016-04-29 20:13:37 +0800753 if (entry->blkaddr == blkaddr)
Sheng Yong26b5a072018-10-12 18:49:26 +0800754 list_move_tail(&entry->list, tmp_inode_list);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900755next:
756 /* check next segment */
757 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700758 f2fs_put_page(page, 1);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900759 }
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900760 if (!err)
Chao Yu901d745f2020-06-22 17:38:48 +0800761 f2fs_allocate_new_segments(sbi);
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900762 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900763}
764
Chao Yu4d57b862018-05-30 00:20:41 +0800765int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900766{
Sheng Yong26b5a072018-10-12 18:49:26 +0800767 struct list_head inode_list, tmp_inode_list;
Chao Yuf61cce52016-05-07 16:15:05 +0800768 struct list_head dir_list;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900769 int err;
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700770 int ret = 0;
Chao Yu4b2414d2017-08-08 10:54:31 +0800771 unsigned long s_flags = sbi->sb->s_flags;
Haicheng Liaabe5132013-10-23 12:39:32 +0800772 bool need_writecp = false;
Shin'ichiro Kawasakic426d992019-12-09 19:44:44 +0900773 bool fix_curseg_write_pointer = false;
Jaegeuk Kimea676732017-10-06 09:14:28 -0700774#ifdef CONFIG_QUOTA
775 int quota_enabled;
776#endif
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900777
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800778 if (s_flags & SB_RDONLY) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800779 f2fs_info(sbi, "recover fsync data on readonly fs");
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800780 sbi->sb->s_flags &= ~SB_RDONLY;
Chao Yu4b2414d2017-08-08 10:54:31 +0800781 }
782
783#ifdef CONFIG_QUOTA
784 /* Needed for iput() to work correctly and not trash data */
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800785 sbi->sb->s_flags |= SB_ACTIVE;
Chao Yu4b2414d2017-08-08 10:54:31 +0800786 /* Turn on quotas so that they are updated correctly */
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800787 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
Chao Yu4b2414d2017-08-08 10:54:31 +0800788#endif
789
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900790 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +0800791 sizeof(struct fsync_inode_entry));
Chao Yu4b2414d2017-08-08 10:54:31 +0800792 if (!fsync_entry_slab) {
793 err = -ENOMEM;
794 goto out;
795 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900796
797 INIT_LIST_HEAD(&inode_list);
Sheng Yong26b5a072018-10-12 18:49:26 +0800798 INIT_LIST_HEAD(&tmp_inode_list);
Chao Yuf61cce52016-05-07 16:15:05 +0800799 INIT_LIST_HEAD(&dir_list);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900800
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700801 /* prevent checkpoint */
Sahitya Tummala87699182020-11-23 10:58:32 +0530802 down_write(&sbi->cp_global_sem);
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700803
Jaegeuk Kim315df832015-08-11 12:45:39 -0700804 /* step #1: find fsynced inode numbers */
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700805 err = find_fsync_dnodes(sbi, &inode_list, check_only);
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700806 if (err || list_empty(&inode_list))
Chao Yu4b2414d2017-08-08 10:54:31 +0800807 goto skip;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900808
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700809 if (check_only) {
810 ret = 1;
Chao Yu4b2414d2017-08-08 10:54:31 +0800811 goto skip;
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700812 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900813
Haicheng Liaabe5132013-10-23 12:39:32 +0800814 need_writecp = true;
Chao Yu691c6fd2013-09-24 09:26:24 +0800815
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900816 /* step #2: recover data */
Sheng Yong26b5a072018-10-12 18:49:26 +0800817 err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
Jaegeuk Kimb3073842014-08-08 10:18:43 -0700818 if (!err)
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700819 f2fs_bug_on(sbi, !list_empty(&inode_list));
Sheng Yong26b5a072018-10-12 18:49:26 +0800820 else {
821 /* restore s_flags to let iput() trash data */
822 sbi->sb->s_flags = s_flags;
823 }
Chao Yu4b2414d2017-08-08 10:54:31 +0800824skip:
Shin'ichiro Kawasakic426d992019-12-09 19:44:44 +0900825 fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
826
Sheng Yong26b5a072018-10-12 18:49:26 +0800827 destroy_fsync_dnodes(&inode_list, err);
828 destroy_fsync_dnodes(&tmp_inode_list, err);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700829
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700830 /* truncate meta pages to be used by the recovery */
831 truncate_inode_pages_range(META_MAPPING(sbi),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300832 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700833
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700834 if (err) {
835 truncate_inode_pages_final(NODE_MAPPING(sbi));
836 truncate_inode_pages_final(META_MAPPING(sbi));
837 }
Shin'ichiro Kawasakic426d992019-12-09 19:44:44 +0900838
839 /*
840 * If fsync data succeeds or there is no fsync data to recover,
841 * and the f2fs is not read only, check and fix zoned block devices'
842 * write pointer consistency.
843 */
844 if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
845 f2fs_sb_has_blkzoned(sbi)) {
846 err = f2fs_fix_curseg_write_pointer(sbi);
847 ret = err;
848 }
849
850 if (!err)
851 clear_sbi_flag(sbi, SBI_POR_DOING);
852
Sahitya Tummala87699182020-11-23 10:58:32 +0530853 up_write(&sbi->cp_global_sem);
Jaegeuk Kima468f0e2016-09-19 17:55:10 -0700854
Jaegeuk Kim9e1e6df2016-09-19 18:13:54 -0700855 /* let's drop all the directory inodes for clean checkpoint */
Sheng Yong26b5a072018-10-12 18:49:26 +0800856 destroy_fsync_dnodes(&dir_list, err);
Jaegeuk Kim9e1e6df2016-09-19 18:13:54 -0700857
Chao Yu13787522018-08-22 17:11:05 +0800858 if (need_writecp) {
859 set_sbi_flag(sbi, SBI_IS_RECOVERED);
860
861 if (!err) {
862 struct cp_control cpc = {
863 .reason = CP_RECOVERY,
864 };
865 err = f2fs_write_checkpoint(sbi, &cpc);
866 }
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700867 }
Chao Yuf61cce52016-05-07 16:15:05 +0800868
Chao Yuf61cce52016-05-07 16:15:05 +0800869 kmem_cache_destroy(fsync_entry_slab);
Chao Yu4b2414d2017-08-08 10:54:31 +0800870out:
871#ifdef CONFIG_QUOTA
872 /* Turn quotas off */
Jaegeuk Kimea676732017-10-06 09:14:28 -0700873 if (quota_enabled)
874 f2fs_quota_off_umount(sbi->sb);
Chao Yu4b2414d2017-08-08 10:54:31 +0800875#endif
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800876 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
Chao Yu4b2414d2017-08-08 10:54:31 +0800877
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700878 return ret ? ret: err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900879}