blob: 695eacfe776c9d133b2476eaded910b5aa4ee79a [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kimd624c962012-11-02 17:13:32 +09003 * fs/f2fs/recovery.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kimd624c962012-11-02 17:13:32 +09007 */
Daniel Rosenberg7ad08a52020-11-19 06:09:04 +00008#include <asm/unaligned.h>
Jaegeuk Kimd624c962012-11-02 17:13:32 +09009#include <linux/fs.h>
10#include <linux/f2fs_fs.h>
11#include "f2fs.h"
12#include "node.h"
13#include "segment.h"
14
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -070015/*
16 * Roll forward recovery scenarios.
17 *
18 * [Term] F: fsync_mark, D: dentry_mark
19 *
20 * 1. inode(x) | CP | inode(x) | dnode(F)
21 * -> Update the latest inode(x).
22 *
23 * 2. inode(x) | CP | inode(F) | dnode(F)
24 * -> No problem.
25 *
26 * 3. inode(x) | CP | dnode(F) | inode(x)
27 * -> Recover to the latest dnode(F), and drop the last inode(x)
28 *
29 * 4. inode(x) | CP | dnode(F) | inode(F)
30 * -> No problem.
31 *
32 * 5. CP | inode(x) | dnode(F)
33 * -> The inode(DF) was missing. Should drop this dnode(F).
34 *
35 * 6. CP | inode(DF) | dnode(F)
36 * -> No problem.
37 *
38 * 7. CP | dnode(F) | inode(DF)
39 * -> If f2fs_iget fails, then goto next to find inode(DF).
40 *
41 * 8. CP | dnode(F) | inode(x)
42 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * But it will fail due to no inode(DF).
44 */
45
Jaegeuk Kimd624c962012-11-02 17:13:32 +090046static struct kmem_cache *fsync_entry_slab;
47
Chao Yu4d9a2bb2021-06-11 07:46:30 +080048#ifdef CONFIG_UNICODE
49extern struct kmem_cache *f2fs_cf_name_slab;
50#endif
51
Chao Yu4d57b862018-05-30 00:20:41 +080052bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090053{
Jaegeuk Kim41382ec2016-05-16 11:06:50 -070054 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
55
56 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090057 return false;
58 return true;
59}
60
61static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
62 nid_t ino)
63{
Jaegeuk Kimd624c962012-11-02 17:13:32 +090064 struct fsync_inode_entry *entry;
65
Chao Yu2d7b8222014-03-29 11:33:17 +080066 list_for_each_entry(entry, head, list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090067 if (entry->inode->i_ino == ino)
68 return entry;
Chao Yu2d7b8222014-03-29 11:33:17 +080069
Jaegeuk Kimd624c962012-11-02 17:13:32 +090070 return NULL;
71}
72
Jaegeuk Kimf4702d62016-09-09 16:48:15 -070073static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
Chao Yu4b2414d2017-08-08 10:54:31 +080074 struct list_head *head, nid_t ino, bool quota_inode)
Chao Yu3f8ab272016-04-29 20:13:37 +080075{
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070076 struct inode *inode;
Chao Yu3f8ab272016-04-29 20:13:37 +080077 struct fsync_inode_entry *entry;
Chao Yu4b2414d2017-08-08 10:54:31 +080078 int err;
Chao Yu3f8ab272016-04-29 20:13:37 +080079
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070080 inode = f2fs_iget_retry(sbi->sb, ino);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -070081 if (IS_ERR(inode))
82 return ERR_CAST(inode);
83
Chao Yu4b2414d2017-08-08 10:54:31 +080084 err = dquot_initialize(inode);
85 if (err)
86 goto err_out;
87
88 if (quota_inode) {
89 err = dquot_alloc_inode(inode);
90 if (err)
91 goto err_out;
92 }
93
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070094 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
Chao Yu3f8ab272016-04-29 20:13:37 +080095 entry->inode = inode;
96 list_add_tail(&entry->list, head);
97
98 return entry;
Chao Yu4b2414d2017-08-08 10:54:31 +080099err_out:
100 iput(inode);
101 return ERR_PTR(err);
Chao Yu3f8ab272016-04-29 20:13:37 +0800102}
103
Sheng Yong26b5a072018-10-12 18:49:26 +0800104static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
Chao Yu3f8ab272016-04-29 20:13:37 +0800105{
Sheng Yong26b5a072018-10-12 18:49:26 +0800106 if (drop) {
107 /* inode should not be recovered, drop it */
108 f2fs_inode_synced(entry->inode);
109 }
Chao Yu3f8ab272016-04-29 20:13:37 +0800110 iput(entry->inode);
111 list_del(&entry->list);
112 kmem_cache_free(fsync_entry_slab, entry);
113}
114
Eric Biggers43c780b2020-05-07 00:59:04 -0700115static int init_recovered_filename(const struct inode *dir,
116 struct f2fs_inode *raw_inode,
117 struct f2fs_filename *fname,
118 struct qstr *usr_fname)
119{
120 int err;
121
122 memset(fname, 0, sizeof(*fname));
123 fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
124 fname->disk_name.name = raw_inode->i_name;
125
126 if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
127 return -ENAMETOOLONG;
128
129 if (!IS_ENCRYPTED(dir)) {
130 usr_fname->name = fname->disk_name.name;
131 usr_fname->len = fname->disk_name.len;
132 fname->usr_fname = usr_fname;
133 }
134
135 /* Compute the hash of the filename */
Daniel Rosenberg7ad08a52020-11-19 06:09:04 +0000136 if (IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)) {
137 /*
138 * In this case the hash isn't computable without the key, so it
139 * was saved on-disk.
140 */
141 if (fname->disk_name.len + sizeof(f2fs_hash_t) > F2FS_NAME_LEN)
142 return -EINVAL;
143 fname->hash = get_unaligned((f2fs_hash_t *)
144 &raw_inode->i_name[fname->disk_name.len]);
145 } else if (IS_CASEFOLDED(dir)) {
Eric Biggers43c780b2020-05-07 00:59:04 -0700146 err = f2fs_init_casefolded_name(dir, fname);
147 if (err)
148 return err;
149 f2fs_hash_filename(dir, fname);
150#ifdef CONFIG_UNICODE
151 /* Case-sensitive match is fine for recovery */
Chao Yu4d9a2bb2021-06-11 07:46:30 +0800152 kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
Eric Biggers43c780b2020-05-07 00:59:04 -0700153 fname->cf_name.name = NULL;
154#endif
155 } else {
156 f2fs_hash_filename(dir, fname);
157 }
158 return 0;
159}
160
Chao Yuf61cce52016-05-07 16:15:05 +0800161static int recover_dentry(struct inode *inode, struct page *ipage,
162 struct list_head *dir_list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900163{
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900164 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900165 nid_t pino = le32_to_cpu(raw_inode->i_pino);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900166 struct f2fs_dir_entry *de;
Eric Biggers43c780b2020-05-07 00:59:04 -0700167 struct f2fs_filename fname;
168 struct qstr usr_fname;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900169 struct page *page;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900170 struct inode *dir, *einode;
Chao Yuf61cce52016-05-07 16:15:05 +0800171 struct fsync_inode_entry *entry;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900172 int err = 0;
Shuoran Liue7ba1082016-08-29 11:27:56 +0800173 char *name;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900174
Chao Yuf61cce52016-05-07 16:15:05 +0800175 entry = get_fsync_inode(dir_list, pino);
176 if (!entry) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800177 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
178 pino, false);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -0700179 if (IS_ERR(entry)) {
180 dir = ERR_CAST(entry);
181 err = PTR_ERR(entry);
Chao Yuf61cce52016-05-07 16:15:05 +0800182 goto out;
183 }
Jaegeuk Kimed57c272014-04-15 11:19:28 +0900184 }
185
Chao Yuf61cce52016-05-07 16:15:05 +0800186 dir = entry->inode;
Eric Biggers43c780b2020-05-07 00:59:04 -0700187 err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
188 if (err)
Chao Yuf61cce52016-05-07 16:15:05 +0800189 goto out;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900190retry:
Shuoran Liue7ba1082016-08-29 11:27:56 +0800191 de = __f2fs_find_entry(dir, &fname, &page);
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700192 if (de && inode->i_ino == le32_to_cpu(de->ino))
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800193 goto out_put;
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700194
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900195 if (de) {
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700196 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900197 if (IS_ERR(einode)) {
198 WARN_ON(1);
Chao Yu5c1f9922014-04-28 17:58:34 +0800199 err = PTR_ERR(einode);
200 if (err == -ENOENT)
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900201 err = -EEXIST;
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800202 goto out_put;
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500203 }
Chao Yu4b2414d2017-08-08 10:54:31 +0800204
205 err = dquot_initialize(einode);
206 if (err) {
207 iput(einode);
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800208 goto out_put;
Chao Yu4b2414d2017-08-08 10:54:31 +0800209 }
210
Chao Yu4d57b862018-05-30 00:20:41 +0800211 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500212 if (err) {
213 iput(einode);
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800214 goto out_put;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900215 }
Chao Yudbeacf02014-09-24 18:17:04 +0800216 f2fs_delete_entry(de, page, dir, einode);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900217 iput(einode);
218 goto retry;
Chao Yu91246c22016-07-19 08:27:47 +0800219 } else if (IS_ERR(page)) {
220 err = PTR_ERR(page);
221 } else {
Chao Yu4d57b862018-05-30 00:20:41 +0800222 err = f2fs_add_dentry(dir, &fname, inode,
Chao Yu91246c22016-07-19 08:27:47 +0800223 inode->i_ino, inode->i_mode);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900224 }
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700225 if (err == -ENOMEM)
226 goto retry;
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500227 goto out;
228
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800229out_put:
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500230 f2fs_put_page(page, 0);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900231out:
Shuoran Liue7ba1082016-08-29 11:27:56 +0800232 if (file_enc_name(inode))
233 name = "<encrypted>";
234 else
235 name = raw_inode->i_name;
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800236 f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
237 __func__, ino_of_node(ipage), name,
238 IS_ERR(dir) ? 0 : dir->i_ino, err);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900239 return err;
240}
241
Chao Yuaf033b22018-09-20 20:05:00 +0800242static int recover_quota_data(struct inode *inode, struct page *page)
243{
244 struct f2fs_inode *raw = F2FS_INODE(page);
245 struct iattr attr;
246 uid_t i_uid = le32_to_cpu(raw->i_uid);
247 gid_t i_gid = le32_to_cpu(raw->i_gid);
248 int err;
249
250 memset(&attr, 0, sizeof(attr));
251
252 attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
253 attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
254
255 if (!uid_eq(attr.ia_uid, inode->i_uid))
256 attr.ia_valid |= ATTR_UID;
257 if (!gid_eq(attr.ia_gid, inode->i_gid))
258 attr.ia_valid |= ATTR_GID;
259
260 if (!attr.ia_valid)
261 return 0;
262
263 err = dquot_transfer(inode, &attr);
264 if (err)
265 set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
266 return err;
267}
268
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800269static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
270{
271 if (ri->i_inline & F2FS_PIN_FILE)
272 set_inode_flag(inode, FI_PIN_FILE);
273 else
274 clear_inode_flag(inode, FI_PIN_FILE);
275 if (ri->i_inline & F2FS_DATA_EXIST)
276 set_inode_flag(inode, FI_DATA_EXIST);
277 else
278 clear_inode_flag(inode, FI_DATA_EXIST);
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800279}
280
Chao Yuaf033b22018-09-20 20:05:00 +0800281static int recover_inode(struct inode *inode, struct page *page)
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700282{
283 struct f2fs_inode *raw = F2FS_INODE(page);
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700284 char *name;
Chao Yuaf033b22018-09-20 20:05:00 +0800285 int err;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700286
287 inode->i_mode = le16_to_cpu(raw->i_mode);
Chao Yuaf033b22018-09-20 20:05:00 +0800288
289 err = recover_quota_data(inode, page);
290 if (err)
291 return err;
292
Chao Yudc4cd122018-09-20 17:41:30 +0800293 i_uid_write(inode, le32_to_cpu(raw->i_uid));
294 i_gid_write(inode, le32_to_cpu(raw->i_gid));
Chao Yuf4474aa2018-09-25 15:35:58 +0800295
296 if (raw->i_inline & F2FS_EXTRA_ATTR) {
Chao Yu7beb01f2018-10-24 18:34:26 +0800297 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
Chao Yuf4474aa2018-09-25 15:35:58 +0800298 F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
299 i_projid)) {
300 projid_t i_projid;
Chao Yu78130812018-09-25 15:36:02 +0800301 kprojid_t kprojid;
Chao Yuf4474aa2018-09-25 15:35:58 +0800302
303 i_projid = (projid_t)le32_to_cpu(raw->i_projid);
Chao Yu78130812018-09-25 15:36:02 +0800304 kprojid = make_kprojid(&init_user_ns, i_projid);
305
306 if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
307 err = f2fs_transfer_project_quota(inode,
308 kprojid);
309 if (err)
310 return err;
311 F2FS_I(inode)->i_projid = kprojid;
312 }
Chao Yuf4474aa2018-09-25 15:35:58 +0800313 }
314 }
315
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -0700316 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
Chao Yu9f0552e2016-11-04 00:26:55 +0800317 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700318 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
319 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
Chao Yu9f0552e2016-11-04 00:26:55 +0800320 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700321 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
322 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900323
Jaegeuk Kim26787232016-11-28 15:33:38 -0800324 F2FS_I(inode)->i_advise = raw->i_advise;
Chao Yu19c73a62018-09-25 15:35:59 +0800325 F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
Chao Yu0c093b52018-10-07 03:03:38 +0800326 f2fs_set_inode_flags(inode);
Chao Yu7de36cf2018-09-25 15:36:00 +0800327 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
328 le16_to_cpu(raw->i_gc_failures);
Jaegeuk Kim26787232016-11-28 15:33:38 -0800329
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800330 recover_inline_flags(inode, raw);
331
Chao Yu4a1728c2018-09-25 15:36:03 +0800332 f2fs_mark_inode_dirty_sync(inode, true);
333
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700334 if (file_enc_name(inode))
335 name = "<encrypted>";
336 else
337 name = F2FS_INODE(page)->i_name;
338
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800339 f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
340 ino_of_node(page), name, raw->i_inline);
Chao Yuaf033b22018-09-20 20:05:00 +0800341 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900342}
343
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700344static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
345 bool check_only)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900346{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900347 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700348 struct page *page = NULL;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900349 block_t blkaddr;
Chao Yufb0e72c2018-02-03 17:44:39 +0800350 unsigned int loop_cnt = 0;
Chao Yu82902c02018-07-05 19:37:00 +0800351 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
352 valid_user_blocks(sbi);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900353 int err = 0;
354
355 /* get node pages in the current segment */
356 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Chao Yu695fd1e2014-02-27 19:52:21 +0800357 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900358
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900359 while (1) {
360 struct fsync_inode_entry *entry;
361
Chao Yue1da7872018-06-05 17:44:11 +0800362 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700363 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900364
Chao Yu4d57b862018-05-30 00:20:41 +0800365 page = f2fs_get_tmp_page(sbi, blkaddr);
Chao Yu77357302018-07-17 00:02:17 +0800366 if (IS_ERR(page)) {
367 err = PTR_ERR(page);
368 break;
369 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900370
Chao Yu98838572019-04-10 18:45:26 +0800371 if (!is_recoverable_dnode(page)) {
372 f2fs_put_page(page, 1);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900373 break;
Chao Yu98838572019-04-10 18:45:26 +0800374 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900375
376 if (!is_fsync_dnode(page))
377 goto next;
378
379 entry = get_fsync_inode(head, ino_of_node(page));
Chao Yud47b8712016-11-05 11:12:40 +0800380 if (!entry) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800381 bool quota_inode = false;
382
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700383 if (!check_only &&
384 IS_INODE(page) && is_dent_dnode(page)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800385 err = f2fs_recover_inode_page(sbi, page);
Chao Yu98838572019-04-10 18:45:26 +0800386 if (err) {
387 f2fs_put_page(page, 1);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900388 break;
Chao Yu98838572019-04-10 18:45:26 +0800389 }
Chao Yu4b2414d2017-08-08 10:54:31 +0800390 quota_inode = true;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900391 }
392
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700393 /*
394 * CP | dnode(F) | inode(DF)
395 * For this case, we should not give up now.
396 */
Chao Yu4b2414d2017-08-08 10:54:31 +0800397 entry = add_fsync_inode(sbi, head, ino_of_node(page),
398 quota_inode);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -0700399 if (IS_ERR(entry)) {
400 err = PTR_ERR(entry);
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800401 if (err == -ENOENT) {
402 err = 0;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700403 goto next;
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800404 }
Chao Yu98838572019-04-10 18:45:26 +0800405 f2fs_put_page(page, 1);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900406 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900407 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900408 }
Jaegeuk Kimaddbe452013-05-15 10:49:13 +0900409 entry->blkaddr = blkaddr;
410
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700411 if (IS_INODE(page) && is_dent_dnode(page))
412 entry->last_dentry = blkaddr;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900413next:
Chao Yufb0e72c2018-02-03 17:44:39 +0800414 /* sanity check in order to detect looped node chain */
415 if (++loop_cnt >= free_blocks ||
416 blkaddr == next_blkaddr_of_node(page)) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800417 f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
418 __func__, blkaddr,
419 next_blkaddr_of_node(page));
Chao Yu98838572019-04-10 18:45:26 +0800420 f2fs_put_page(page, 1);
Chao Yufb0e72c2018-02-03 17:44:39 +0800421 err = -EINVAL;
422 break;
423 }
424
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900425 /* check next segment */
426 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700427 f2fs_put_page(page, 1);
Chao Yu635aee12014-12-08 15:02:52 +0800428
Chao Yu4d57b862018-05-30 00:20:41 +0800429 f2fs_ra_meta_pages_cond(sbi, blkaddr);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900430 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900431 return err;
432}
433
Sheng Yong26b5a072018-10-12 18:49:26 +0800434static void destroy_fsync_dnodes(struct list_head *head, int drop)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900435{
Dan Carpenterd8b79b22013-01-20 18:02:58 +0300436 struct fsync_inode_entry *entry, *tmp;
437
Chao Yu3f8ab272016-04-29 20:13:37 +0800438 list_for_each_entry_safe(entry, tmp, head, list)
Sheng Yong26b5a072018-10-12 18:49:26 +0800439 del_fsync_inode(entry, drop);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900440}
441
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900442static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900443 block_t blkaddr, struct dnode_of_data *dn)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900444{
445 struct seg_entry *sentry;
446 unsigned int segno = GET_SEGNO(sbi, blkaddr);
Jaegeuk Kim491c0852014-02-04 13:01:10 +0900447 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900448 struct f2fs_summary_block *sum_node;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900449 struct f2fs_summary sum;
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900450 struct page *sum_page, *node_page;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700451 struct dnode_of_data tdn = *dn;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900452 nid_t ino, nid;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900453 struct inode *inode;
Jaegeuk Kimde936532013-08-12 21:08:03 +0900454 unsigned int offset;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900455 block_t bidx;
456 int i;
457
458 sentry = get_seg_entry(sbi, segno);
459 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900460 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900461
462 /* Get the previous summary */
Jaegeuk Kim125c9fb2017-08-12 21:33:23 -0700463 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900464 struct curseg_info *curseg = CURSEG_I(sbi, i);
Yi Zhuang5f029c02021-04-06 09:47:35 +0800465
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900466 if (curseg->segno == segno) {
467 sum = curseg->sum_blk->entries[blkoff];
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900468 goto got_it;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900469 }
470 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900471
Chao Yu4d57b862018-05-30 00:20:41 +0800472 sum_page = f2fs_get_sum_page(sbi, segno);
Jaegeuk Kimedc55aa2018-09-17 17:36:06 -0700473 if (IS_ERR(sum_page))
474 return PTR_ERR(sum_page);
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900475 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
476 sum = sum_node->entries[blkoff];
477 f2fs_put_page(sum_page, 1);
478got_it:
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900479 /* Use the locked dnode page and inode */
480 nid = le32_to_cpu(sum.nid);
481 if (dn->inode->i_ino == nid) {
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900482 tdn.nid = nid;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700483 if (!dn->inode_page_locked)
484 lock_page(dn->inode_page);
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900485 tdn.node_page = dn->inode_page;
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900486 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700487 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900488 } else if (dn->nid == nid) {
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900489 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700490 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900491 }
492
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900493 /* Get the node page */
Chao Yu4d57b862018-05-30 00:20:41 +0800494 node_page = f2fs_get_node_page(sbi, nid);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900495 if (IS_ERR(node_page))
496 return PTR_ERR(node_page);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900497
498 offset = ofs_of_node(node_page);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900499 ino = ino_of_node(node_page);
500 f2fs_put_page(node_page, 1);
501
Jaegeuk Kim60979112014-09-13 00:35:58 +0900502 if (ino != dn->inode->i_ino) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800503 int ret;
504
Jaegeuk Kim60979112014-09-13 00:35:58 +0900505 /* Deallocate previous index in the node page */
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700506 inode = f2fs_iget_retry(sbi->sb, ino);
Jaegeuk Kim60979112014-09-13 00:35:58 +0900507 if (IS_ERR(inode))
508 return PTR_ERR(inode);
Chao Yu4b2414d2017-08-08 10:54:31 +0800509
510 ret = dquot_initialize(inode);
511 if (ret) {
512 iput(inode);
513 return ret;
514 }
Jaegeuk Kim60979112014-09-13 00:35:58 +0900515 } else {
516 inode = dn->inode;
517 }
Namjae Jeon06025f42012-12-22 12:09:43 +0900518
Chao Yu4d57b862018-05-30 00:20:41 +0800519 bidx = f2fs_start_bidx_of_node(offset, inode) +
520 le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900521
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700522 /*
523 * if inode page is locked, unlock temporarily, but its reference
524 * count keeps alive.
525 */
526 if (ino == dn->inode->i_ino && dn->inode_page_locked)
527 unlock_page(dn->inode_page);
528
529 set_new_dnode(&tdn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +0800530 if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700531 goto out;
532
533 if (tdn.data_blkaddr == blkaddr)
Chao Yu4d57b862018-05-30 00:20:41 +0800534 f2fs_truncate_data_blocks_range(&tdn, 1);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700535
536 f2fs_put_dnode(&tdn);
537out:
538 if (ino != dn->inode->i_ino)
Jaegeuk Kim60979112014-09-13 00:35:58 +0900539 iput(inode);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700540 else if (dn->inode_page_locked)
541 lock_page(dn->inode_page);
542 return 0;
543
544truncate_out:
Chao Yua2ced1c2020-02-14 17:44:10 +0800545 if (f2fs_data_blkaddr(&tdn) == blkaddr)
Chao Yu4d57b862018-05-30 00:20:41 +0800546 f2fs_truncate_data_blocks_range(&tdn, 1);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700547 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
548 unlock_page(dn->inode_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900549 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900550}
551
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900552static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
Sheng Yonge17d4882017-11-22 18:23:40 +0800553 struct page *page)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900554{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900555 struct dnode_of_data dn;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900556 struct node_info ni;
Chao Yu81ca7352016-01-26 15:39:35 +0800557 unsigned int start, end;
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900558 int err = 0, recovered = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900559
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700560 /* step 1: recover xattr */
561 if (IS_INODE(page)) {
Chao Yu9627a7b2020-07-06 18:23:36 +0800562 err = f2fs_recover_inline_xattr(inode, page);
563 if (err)
564 goto out;
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700565 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
Chao Yu4d57b862018-05-30 00:20:41 +0800566 err = f2fs_recover_xattr_data(inode, page);
Chao Yud2600812017-02-08 17:39:45 +0800567 if (!err)
568 recovered++;
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700569 goto out;
570 }
Chao Yu70cfed82014-08-02 15:26:04 +0800571
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700572 /* step 2: recover inline data */
Chao Yu9627a7b2020-07-06 18:23:36 +0800573 err = f2fs_recover_inline_data(inode, page);
574 if (err) {
575 if (err == 1)
576 err = 0;
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900577 goto out;
Chao Yu9627a7b2020-07-06 18:23:36 +0800578 }
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900579
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700580 /* step 3: recover data indices */
Chao Yu4d57b862018-05-30 00:20:41 +0800581 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
Chao Yu81ca7352016-01-26 15:39:35 +0800582 end = start + ADDRS_PER_PAGE(page, inode);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900583
584 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700585retry_dn:
Chao Yu4d57b862018-05-30 00:20:41 +0800586 err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700587 if (err) {
588 if (err == -ENOMEM) {
Chao Yu5df7731f2020-02-17 17:45:44 +0800589 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700590 goto retry_dn;
591 }
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900592 goto out;
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700593 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900594
Chao Yubae0ee72018-12-25 17:43:42 +0800595 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900596
Chao Yu77357302018-07-17 00:02:17 +0800597 err = f2fs_get_node_info(sbi, dn.nid, &ni);
598 if (err)
599 goto err;
600
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700601 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
Chao Yu22d61e22019-04-15 15:28:37 +0800602
603 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800604 f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
605 inode->i_ino, ofs_of_node(dn.node_page),
606 ofs_of_node(page));
Chao Yu10f966b2019-06-20 11:36:14 +0800607 err = -EFSCORRUPTED;
Chao Yu22d61e22019-04-15 15:28:37 +0800608 goto err;
609 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900610
Chao Yu12a83432015-08-05 17:23:54 +0800611 for (; start < end; start++, dn.ofs_in_node++) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900612 block_t src, dest;
613
Chao Yua2ced1c2020-02-14 17:44:10 +0800614 src = f2fs_data_blkaddr(&dn);
615 dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900616
Chao Yu93770ab2019-04-15 15:26:32 +0800617 if (__is_valid_data_blkaddr(src) &&
618 !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
Chao Yu10f966b2019-06-20 11:36:14 +0800619 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +0800620 goto err;
621 }
622
623 if (__is_valid_data_blkaddr(dest) &&
624 !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
Chao Yu10f966b2019-06-20 11:36:14 +0800625 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +0800626 goto err;
627 }
628
Chao Yu12a83432015-08-05 17:23:54 +0800629 /* skip recovering if dest is the same as src */
630 if (src == dest)
631 continue;
632
633 /* dest is invalid, just invalidate src block */
634 if (dest == NULL_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800635 f2fs_truncate_data_blocks_range(&dn, 1);
Chao Yu12a83432015-08-05 17:23:54 +0800636 continue;
637 }
638
Jaegeuk Kim26787232016-11-28 15:33:38 -0800639 if (!file_keep_isize(inode) &&
Chao Yudba79f32017-01-25 10:52:39 +0800640 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
641 f2fs_i_size_write(inode,
642 (loff_t)(start + 1) << PAGE_SHIFT);
Jaegeuk Kim26de9b12016-05-20 20:42:37 -0700643
Chao Yu12a83432015-08-05 17:23:54 +0800644 /*
645 * dest is reserved block, invalidate src block
646 * and then reserve one new block in dnode page.
647 */
648 if (dest == NEW_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800649 f2fs_truncate_data_blocks_range(&dn, 1);
650 f2fs_reserve_new_block(&dn);
Chao Yu12a83432015-08-05 17:23:54 +0800651 continue;
652 }
653
654 /* dest is valid block, try to recover from src to dest */
Chao Yue1da7872018-06-05 17:44:11 +0800655 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
Jaegeuk Kime03b07d2015-04-01 19:38:20 -0700656
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900657 if (src == NULL_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800658 err = f2fs_reserve_new_block(&dn);
Arnd Bergmann7fa750a2018-08-13 23:38:06 +0200659 while (err &&
660 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
Chao Yu4d57b862018-05-30 00:20:41 +0800661 err = f2fs_reserve_new_block(&dn);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900662 /* We should not get -ENOSPC */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700663 f2fs_bug_on(sbi, err);
Jaegeuk Kim6f3ec992016-07-19 19:30:06 -0700664 if (err)
665 goto err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900666 }
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700667retry_prev:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900668 /* Check the previous node page having this index */
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900669 err = check_index_in_prev_nodes(sbi, dest, &dn);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700670 if (err) {
671 if (err == -ENOMEM) {
Chao Yu5df7731f2020-02-17 17:45:44 +0800672 congestion_wait(BLK_RW_ASYNC,
673 DEFAULT_IO_TIMEOUT);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700674 goto retry_prev;
675 }
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900676 goto err;
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700677 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900678
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900679 /* write dummy data page */
Chao Yu528e3452015-05-28 19:15:35 +0800680 f2fs_replace_block(sbi, &dn, src, dest,
Chao Yu28bc1062016-02-06 14:40:34 +0800681 ni.version, false, false);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900682 recovered++;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900683 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900684 }
685
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900686 copy_node_footer(dn.node_page, page);
687 fill_node_footer(dn.node_page, dn.nid, ni.ino,
688 ofs_of_node(page), false);
689 set_page_dirty(dn.node_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900690err:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900691 f2fs_put_dnode(&dn);
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900692out:
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800693 f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
694 inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
695 recovered, err);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900696 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900697}
698
Chao Yuf61cce52016-05-07 16:15:05 +0800699static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
Sheng Yong26b5a072018-10-12 18:49:26 +0800700 struct list_head *tmp_inode_list, struct list_head *dir_list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900701{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900702 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700703 struct page *page = NULL;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900704 int err = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900705 block_t blkaddr;
706
707 /* get node pages in the current segment */
Chao Yub7973f22015-12-01 11:43:59 +0800708 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900709 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
710
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900711 while (1) {
712 struct fsync_inode_entry *entry;
713
Chao Yue1da7872018-06-05 17:44:11 +0800714 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900715 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900716
Chao Yu4d57b862018-05-30 00:20:41 +0800717 f2fs_ra_meta_pages_cond(sbi, blkaddr);
Chao Yu635aee12014-12-08 15:02:52 +0800718
Chao Yu4d57b862018-05-30 00:20:41 +0800719 page = f2fs_get_tmp_page(sbi, blkaddr);
Chao Yu77357302018-07-17 00:02:17 +0800720 if (IS_ERR(page)) {
721 err = PTR_ERR(page);
722 break;
723 }
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700724
Jaegeuk Kima468f0e2016-09-19 17:55:10 -0700725 if (!is_recoverable_dnode(page)) {
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700726 f2fs_put_page(page, 1);
727 break;
728 }
729
Chao Yuf61cce52016-05-07 16:15:05 +0800730 entry = get_fsync_inode(inode_list, ino_of_node(page));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900731 if (!entry)
732 goto next;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700733 /*
734 * inode(x) | CP | inode(x) | dnode(F)
735 * In this case, we can lose the latest inode(x).
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700736 * So, call recover_inode for the inode update.
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700737 */
Chao Yuaf033b22018-09-20 20:05:00 +0800738 if (IS_INODE(page)) {
739 err = recover_inode(entry->inode, page);
Chao Yu98838572019-04-10 18:45:26 +0800740 if (err) {
741 f2fs_put_page(page, 1);
Chao Yuaf033b22018-09-20 20:05:00 +0800742 break;
Chao Yu98838572019-04-10 18:45:26 +0800743 }
Chao Yuaf033b22018-09-20 20:05:00 +0800744 }
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700745 if (entry->last_dentry == blkaddr) {
Chao Yuf61cce52016-05-07 16:15:05 +0800746 err = recover_dentry(entry->inode, page, dir_list);
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700747 if (err) {
748 f2fs_put_page(page, 1);
749 break;
750 }
751 }
Sheng Yonge17d4882017-11-22 18:23:40 +0800752 err = do_recover_data(sbi, entry->inode, page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700753 if (err) {
754 f2fs_put_page(page, 1);
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900755 break;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700756 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900757
Chao Yu3f8ab272016-04-29 20:13:37 +0800758 if (entry->blkaddr == blkaddr)
Sheng Yong26b5a072018-10-12 18:49:26 +0800759 list_move_tail(&entry->list, tmp_inode_list);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900760next:
761 /* check next segment */
762 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700763 f2fs_put_page(page, 1);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900764 }
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900765 if (!err)
Chao Yu901d745f2020-06-22 17:38:48 +0800766 f2fs_allocate_new_segments(sbi);
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900767 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900768}
769
Chao Yu4d57b862018-05-30 00:20:41 +0800770int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900771{
Sheng Yong26b5a072018-10-12 18:49:26 +0800772 struct list_head inode_list, tmp_inode_list;
Chao Yuf61cce52016-05-07 16:15:05 +0800773 struct list_head dir_list;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900774 int err;
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700775 int ret = 0;
Chao Yu4b2414d2017-08-08 10:54:31 +0800776 unsigned long s_flags = sbi->sb->s_flags;
Haicheng Liaabe5132013-10-23 12:39:32 +0800777 bool need_writecp = false;
Shin'ichiro Kawasakic426d992019-12-09 19:44:44 +0900778 bool fix_curseg_write_pointer = false;
Jaegeuk Kimea676732017-10-06 09:14:28 -0700779#ifdef CONFIG_QUOTA
780 int quota_enabled;
781#endif
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900782
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800783 if (s_flags & SB_RDONLY) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800784 f2fs_info(sbi, "recover fsync data on readonly fs");
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800785 sbi->sb->s_flags &= ~SB_RDONLY;
Chao Yu4b2414d2017-08-08 10:54:31 +0800786 }
787
788#ifdef CONFIG_QUOTA
789 /* Needed for iput() to work correctly and not trash data */
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800790 sbi->sb->s_flags |= SB_ACTIVE;
Chao Yu4b2414d2017-08-08 10:54:31 +0800791 /* Turn on quotas so that they are updated correctly */
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800792 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
Chao Yu4b2414d2017-08-08 10:54:31 +0800793#endif
794
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900795 INIT_LIST_HEAD(&inode_list);
Sheng Yong26b5a072018-10-12 18:49:26 +0800796 INIT_LIST_HEAD(&tmp_inode_list);
Chao Yuf61cce52016-05-07 16:15:05 +0800797 INIT_LIST_HEAD(&dir_list);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900798
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700799 /* prevent checkpoint */
Sahitya Tummala87699182020-11-23 10:58:32 +0530800 down_write(&sbi->cp_global_sem);
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700801
Jaegeuk Kim315df832015-08-11 12:45:39 -0700802 /* step #1: find fsynced inode numbers */
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700803 err = find_fsync_dnodes(sbi, &inode_list, check_only);
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700804 if (err || list_empty(&inode_list))
Chao Yu4b2414d2017-08-08 10:54:31 +0800805 goto skip;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900806
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700807 if (check_only) {
808 ret = 1;
Chao Yu4b2414d2017-08-08 10:54:31 +0800809 goto skip;
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700810 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900811
Haicheng Liaabe5132013-10-23 12:39:32 +0800812 need_writecp = true;
Chao Yu691c6fd2013-09-24 09:26:24 +0800813
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900814 /* step #2: recover data */
Sheng Yong26b5a072018-10-12 18:49:26 +0800815 err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
Jaegeuk Kimb3073842014-08-08 10:18:43 -0700816 if (!err)
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700817 f2fs_bug_on(sbi, !list_empty(&inode_list));
Sheng Yong26b5a072018-10-12 18:49:26 +0800818 else {
819 /* restore s_flags to let iput() trash data */
820 sbi->sb->s_flags = s_flags;
821 }
Chao Yu4b2414d2017-08-08 10:54:31 +0800822skip:
Shin'ichiro Kawasakic426d992019-12-09 19:44:44 +0900823 fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
824
Sheng Yong26b5a072018-10-12 18:49:26 +0800825 destroy_fsync_dnodes(&inode_list, err);
826 destroy_fsync_dnodes(&tmp_inode_list, err);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700827
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700828 /* truncate meta pages to be used by the recovery */
829 truncate_inode_pages_range(META_MAPPING(sbi),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300830 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700831
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700832 if (err) {
833 truncate_inode_pages_final(NODE_MAPPING(sbi));
834 truncate_inode_pages_final(META_MAPPING(sbi));
835 }
Shin'ichiro Kawasakic426d992019-12-09 19:44:44 +0900836
837 /*
838 * If fsync data succeeds or there is no fsync data to recover,
839 * and the f2fs is not read only, check and fix zoned block devices'
840 * write pointer consistency.
841 */
842 if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
843 f2fs_sb_has_blkzoned(sbi)) {
844 err = f2fs_fix_curseg_write_pointer(sbi);
845 ret = err;
846 }
847
848 if (!err)
849 clear_sbi_flag(sbi, SBI_POR_DOING);
850
Sahitya Tummala87699182020-11-23 10:58:32 +0530851 up_write(&sbi->cp_global_sem);
Jaegeuk Kima468f0e2016-09-19 17:55:10 -0700852
Jaegeuk Kim9e1e6df2016-09-19 18:13:54 -0700853 /* let's drop all the directory inodes for clean checkpoint */
Sheng Yong26b5a072018-10-12 18:49:26 +0800854 destroy_fsync_dnodes(&dir_list, err);
Jaegeuk Kim9e1e6df2016-09-19 18:13:54 -0700855
Chao Yu13787522018-08-22 17:11:05 +0800856 if (need_writecp) {
857 set_sbi_flag(sbi, SBI_IS_RECOVERED);
858
859 if (!err) {
860 struct cp_control cpc = {
861 .reason = CP_RECOVERY,
862 };
863 err = f2fs_write_checkpoint(sbi, &cpc);
864 }
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700865 }
Chao Yuf61cce52016-05-07 16:15:05 +0800866
Chao Yu4b2414d2017-08-08 10:54:31 +0800867#ifdef CONFIG_QUOTA
868 /* Turn quotas off */
Jaegeuk Kimea676732017-10-06 09:14:28 -0700869 if (quota_enabled)
870 f2fs_quota_off_umount(sbi->sb);
Chao Yu4b2414d2017-08-08 10:54:31 +0800871#endif
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800872 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
Chao Yu4b2414d2017-08-08 10:54:31 +0800873
Yi Zhuang5f029c02021-04-06 09:47:35 +0800874 return ret ? ret : err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900875}
Chao Yucad83c92021-05-07 18:10:38 +0800876
877int __init f2fs_create_recovery_cache(void)
878{
879 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
880 sizeof(struct fsync_inode_entry));
881 if (!fsync_entry_slab)
882 return -ENOMEM;
883 return 0;
884}
885
886void f2fs_destroy_recovery_cache(void)
887{
888 kmem_cache_destroy(fsync_entry_slab);
889}