blob: fb24a6d734e90b313d6ae629f2df2d3fe1424b83 [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kimd624c962012-11-02 17:13:32 +09003 * fs/f2fs/recovery.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kimd624c962012-11-02 17:13:32 +09007 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include "f2fs.h"
11#include "node.h"
12#include "segment.h"
13
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -070014/*
15 * Roll forward recovery scenarios.
16 *
17 * [Term] F: fsync_mark, D: dentry_mark
18 *
19 * 1. inode(x) | CP | inode(x) | dnode(F)
20 * -> Update the latest inode(x).
21 *
22 * 2. inode(x) | CP | inode(F) | dnode(F)
23 * -> No problem.
24 *
25 * 3. inode(x) | CP | dnode(F) | inode(x)
26 * -> Recover to the latest dnode(F), and drop the last inode(x)
27 *
28 * 4. inode(x) | CP | dnode(F) | inode(F)
29 * -> No problem.
30 *
31 * 5. CP | inode(x) | dnode(F)
32 * -> The inode(DF) was missing. Should drop this dnode(F).
33 *
34 * 6. CP | inode(DF) | dnode(F)
35 * -> No problem.
36 *
37 * 7. CP | dnode(F) | inode(DF)
38 * -> If f2fs_iget fails, then goto next to find inode(DF).
39 *
40 * 8. CP | dnode(F) | inode(x)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
42 * But it will fail due to no inode(DF).
43 */
44
Jaegeuk Kimd624c962012-11-02 17:13:32 +090045static struct kmem_cache *fsync_entry_slab;
46
Chao Yu4d57b862018-05-30 00:20:41 +080047bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090048{
Jaegeuk Kim41382ec2016-05-16 11:06:50 -070049 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
50
51 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090052 return false;
53 return true;
54}
55
56static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
57 nid_t ino)
58{
Jaegeuk Kimd624c962012-11-02 17:13:32 +090059 struct fsync_inode_entry *entry;
60
Chao Yu2d7b8222014-03-29 11:33:17 +080061 list_for_each_entry(entry, head, list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090062 if (entry->inode->i_ino == ino)
63 return entry;
Chao Yu2d7b8222014-03-29 11:33:17 +080064
Jaegeuk Kimd624c962012-11-02 17:13:32 +090065 return NULL;
66}
67
Jaegeuk Kimf4702d62016-09-09 16:48:15 -070068static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
Chao Yu4b2414d2017-08-08 10:54:31 +080069 struct list_head *head, nid_t ino, bool quota_inode)
Chao Yu3f8ab272016-04-29 20:13:37 +080070{
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070071 struct inode *inode;
Chao Yu3f8ab272016-04-29 20:13:37 +080072 struct fsync_inode_entry *entry;
Chao Yu4b2414d2017-08-08 10:54:31 +080073 int err;
Chao Yu3f8ab272016-04-29 20:13:37 +080074
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070075 inode = f2fs_iget_retry(sbi->sb, ino);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -070076 if (IS_ERR(inode))
77 return ERR_CAST(inode);
78
Chao Yu4b2414d2017-08-08 10:54:31 +080079 err = dquot_initialize(inode);
80 if (err)
81 goto err_out;
82
83 if (quota_inode) {
84 err = dquot_alloc_inode(inode);
85 if (err)
86 goto err_out;
87 }
88
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070089 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
Chao Yu3f8ab272016-04-29 20:13:37 +080090 entry->inode = inode;
91 list_add_tail(&entry->list, head);
92
93 return entry;
Chao Yu4b2414d2017-08-08 10:54:31 +080094err_out:
95 iput(inode);
96 return ERR_PTR(err);
Chao Yu3f8ab272016-04-29 20:13:37 +080097}
98
99static void del_fsync_inode(struct fsync_inode_entry *entry)
100{
101 iput(entry->inode);
102 list_del(&entry->list);
103 kmem_cache_free(fsync_entry_slab, entry);
104}
105
Chao Yuf61cce52016-05-07 16:15:05 +0800106static int recover_dentry(struct inode *inode, struct page *ipage,
107 struct list_head *dir_list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900108{
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900109 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900110 nid_t pino = le32_to_cpu(raw_inode->i_pino);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900111 struct f2fs_dir_entry *de;
Shuoran Liue7ba1082016-08-29 11:27:56 +0800112 struct fscrypt_name fname;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900113 struct page *page;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900114 struct inode *dir, *einode;
Chao Yuf61cce52016-05-07 16:15:05 +0800115 struct fsync_inode_entry *entry;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900116 int err = 0;
Shuoran Liue7ba1082016-08-29 11:27:56 +0800117 char *name;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900118
Chao Yuf61cce52016-05-07 16:15:05 +0800119 entry = get_fsync_inode(dir_list, pino);
120 if (!entry) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800121 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
122 pino, false);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -0700123 if (IS_ERR(entry)) {
124 dir = ERR_CAST(entry);
125 err = PTR_ERR(entry);
Chao Yuf61cce52016-05-07 16:15:05 +0800126 goto out;
127 }
Jaegeuk Kimed57c272014-04-15 11:19:28 +0900128 }
129
Chao Yuf61cce52016-05-07 16:15:05 +0800130 dir = entry->inode;
131
Shuoran Liue7ba1082016-08-29 11:27:56 +0800132 memset(&fname, 0, sizeof(struct fscrypt_name));
133 fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
134 fname.disk_name.name = raw_inode->i_name;
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700135
Shuoran Liue7ba1082016-08-29 11:27:56 +0800136 if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
Chao Yud96b14312013-12-23 11:12:21 +0800137 WARN_ON(1);
138 err = -ENAMETOOLONG;
Chao Yuf61cce52016-05-07 16:15:05 +0800139 goto out;
Chao Yud96b14312013-12-23 11:12:21 +0800140 }
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900141retry:
Shuoran Liue7ba1082016-08-29 11:27:56 +0800142 de = __f2fs_find_entry(dir, &fname, &page);
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700143 if (de && inode->i_ino == le32_to_cpu(de->ino))
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800144 goto out_put;
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700145
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900146 if (de) {
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700147 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900148 if (IS_ERR(einode)) {
149 WARN_ON(1);
Chao Yu5c1f9922014-04-28 17:58:34 +0800150 err = PTR_ERR(einode);
151 if (err == -ENOENT)
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900152 err = -EEXIST;
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800153 goto out_put;
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500154 }
Chao Yu4b2414d2017-08-08 10:54:31 +0800155
156 err = dquot_initialize(einode);
157 if (err) {
158 iput(einode);
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800159 goto out_put;
Chao Yu4b2414d2017-08-08 10:54:31 +0800160 }
161
Chao Yu4d57b862018-05-30 00:20:41 +0800162 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500163 if (err) {
164 iput(einode);
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800165 goto out_put;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900166 }
Chao Yudbeacf02014-09-24 18:17:04 +0800167 f2fs_delete_entry(de, page, dir, einode);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900168 iput(einode);
169 goto retry;
Chao Yu91246c22016-07-19 08:27:47 +0800170 } else if (IS_ERR(page)) {
171 err = PTR_ERR(page);
172 } else {
Chao Yu4d57b862018-05-30 00:20:41 +0800173 err = f2fs_add_dentry(dir, &fname, inode,
Chao Yu91246c22016-07-19 08:27:47 +0800174 inode->i_ino, inode->i_mode);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900175 }
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700176 if (err == -ENOMEM)
177 goto retry;
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500178 goto out;
179
Yunlong Songbdbc90f2018-02-28 20:31:52 +0800180out_put:
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500181 f2fs_put_page(page, 0);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900182out:
Shuoran Liue7ba1082016-08-29 11:27:56 +0800183 if (file_enc_name(inode))
184 name = "<encrypted>";
185 else
186 name = raw_inode->i_name;
Chris Fries6c311ec2014-01-17 14:44:39 -0600187 f2fs_msg(inode->i_sb, KERN_NOTICE,
188 "%s: ino = %x, name = %s, dir = %lx, err = %d",
Shuoran Liue7ba1082016-08-29 11:27:56 +0800189 __func__, ino_of_node(ipage), name,
Dan Carpenterf28c06f2013-05-23 13:02:13 +0300190 IS_ERR(dir) ? 0 : dir->i_ino, err);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900191 return err;
192}
193
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800194static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
195{
196 if (ri->i_inline & F2FS_PIN_FILE)
197 set_inode_flag(inode, FI_PIN_FILE);
198 else
199 clear_inode_flag(inode, FI_PIN_FILE);
200 if (ri->i_inline & F2FS_DATA_EXIST)
201 set_inode_flag(inode, FI_DATA_EXIST);
202 else
203 clear_inode_flag(inode, FI_DATA_EXIST);
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800204}
205
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700206static void recover_inode(struct inode *inode, struct page *page)
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700207{
208 struct f2fs_inode *raw = F2FS_INODE(page);
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700209 char *name;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700210
211 inode->i_mode = le16_to_cpu(raw->i_mode);
Chao Yudc4cd122018-09-20 17:41:30 +0800212 i_uid_write(inode, le32_to_cpu(raw->i_uid));
213 i_gid_write(inode, le32_to_cpu(raw->i_gid));
Chao Yuf4474aa2018-09-25 15:35:58 +0800214
215 if (raw->i_inline & F2FS_EXTRA_ATTR) {
216 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
217 F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
218 i_projid)) {
219 projid_t i_projid;
220
221 i_projid = (projid_t)le32_to_cpu(raw->i_projid);
222 F2FS_I(inode)->i_projid =
223 make_kprojid(&init_user_ns, i_projid);
224 }
225 }
226
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -0700227 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
Chao Yu9f0552e2016-11-04 00:26:55 +0800228 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700229 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
230 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
Chao Yu9f0552e2016-11-04 00:26:55 +0800231 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700232 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
233 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900234
Jaegeuk Kim26787232016-11-28 15:33:38 -0800235 F2FS_I(inode)->i_advise = raw->i_advise;
Chao Yu19c73a62018-09-25 15:35:59 +0800236 F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
Chao Yu7de36cf2018-09-25 15:36:00 +0800237 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
238 le16_to_cpu(raw->i_gc_failures);
Jaegeuk Kim26787232016-11-28 15:33:38 -0800239
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800240 recover_inline_flags(inode, raw);
241
Chao Yu4a1728c2018-09-25 15:36:03 +0800242 f2fs_mark_inode_dirty_sync(inode, true);
243
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700244 if (file_enc_name(inode))
245 name = "<encrypted>";
246 else
247 name = F2FS_INODE(page)->i_name;
248
Jaegeuk Kim37a086f2018-01-19 20:01:40 -0800249 f2fs_msg(inode->i_sb, KERN_NOTICE,
250 "recover_inode: ino = %x, name = %s, inline = %x",
251 ino_of_node(page), name, raw->i_inline);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900252}
253
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700254static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
255 bool check_only)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900256{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900257 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700258 struct page *page = NULL;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900259 block_t blkaddr;
Chao Yufb0e72c2018-02-03 17:44:39 +0800260 unsigned int loop_cnt = 0;
Chao Yu82902c02018-07-05 19:37:00 +0800261 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
262 valid_user_blocks(sbi);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900263 int err = 0;
264
265 /* get node pages in the current segment */
266 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Chao Yu695fd1e2014-02-27 19:52:21 +0800267 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900268
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900269 while (1) {
270 struct fsync_inode_entry *entry;
271
Chao Yue1da7872018-06-05 17:44:11 +0800272 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700273 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900274
Chao Yu4d57b862018-05-30 00:20:41 +0800275 page = f2fs_get_tmp_page(sbi, blkaddr);
Chao Yu77357302018-07-17 00:02:17 +0800276 if (IS_ERR(page)) {
277 err = PTR_ERR(page);
278 break;
279 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900280
Jaegeuk Kima468f0e2016-09-19 17:55:10 -0700281 if (!is_recoverable_dnode(page))
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900282 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900283
284 if (!is_fsync_dnode(page))
285 goto next;
286
287 entry = get_fsync_inode(head, ino_of_node(page));
Chao Yud47b8712016-11-05 11:12:40 +0800288 if (!entry) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800289 bool quota_inode = false;
290
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700291 if (!check_only &&
292 IS_INODE(page) && is_dent_dnode(page)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800293 err = f2fs_recover_inode_page(sbi, page);
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900294 if (err)
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900295 break;
Chao Yu4b2414d2017-08-08 10:54:31 +0800296 quota_inode = true;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900297 }
298
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700299 /*
300 * CP | dnode(F) | inode(DF)
301 * For this case, we should not give up now.
302 */
Chao Yu4b2414d2017-08-08 10:54:31 +0800303 entry = add_fsync_inode(sbi, head, ino_of_node(page),
304 quota_inode);
Jaegeuk Kimf4702d62016-09-09 16:48:15 -0700305 if (IS_ERR(entry)) {
306 err = PTR_ERR(entry);
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800307 if (err == -ENOENT) {
308 err = 0;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700309 goto next;
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800310 }
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900311 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900312 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900313 }
Jaegeuk Kimaddbe452013-05-15 10:49:13 +0900314 entry->blkaddr = blkaddr;
315
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700316 if (IS_INODE(page) && is_dent_dnode(page))
317 entry->last_dentry = blkaddr;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900318next:
Chao Yufb0e72c2018-02-03 17:44:39 +0800319 /* sanity check in order to detect looped node chain */
320 if (++loop_cnt >= free_blocks ||
321 blkaddr == next_blkaddr_of_node(page)) {
322 f2fs_msg(sbi->sb, KERN_NOTICE,
323 "%s: detect looped node chain, "
324 "blkaddr:%u, next:%u",
325 __func__, blkaddr, next_blkaddr_of_node(page));
326 err = -EINVAL;
327 break;
328 }
329
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900330 /* check next segment */
331 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700332 f2fs_put_page(page, 1);
Chao Yu635aee12014-12-08 15:02:52 +0800333
Chao Yu4d57b862018-05-30 00:20:41 +0800334 f2fs_ra_meta_pages_cond(sbi, blkaddr);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900335 }
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700336 f2fs_put_page(page, 1);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900337 return err;
338}
339
Gu Zheng5ebefc52013-06-27 09:28:54 +0800340static void destroy_fsync_dnodes(struct list_head *head)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900341{
Dan Carpenterd8b79b22013-01-20 18:02:58 +0300342 struct fsync_inode_entry *entry, *tmp;
343
Chao Yu3f8ab272016-04-29 20:13:37 +0800344 list_for_each_entry_safe(entry, tmp, head, list)
345 del_fsync_inode(entry);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900346}
347
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900348static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900349 block_t blkaddr, struct dnode_of_data *dn)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900350{
351 struct seg_entry *sentry;
352 unsigned int segno = GET_SEGNO(sbi, blkaddr);
Jaegeuk Kim491c0852014-02-04 13:01:10 +0900353 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900354 struct f2fs_summary_block *sum_node;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900355 struct f2fs_summary sum;
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900356 struct page *sum_page, *node_page;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700357 struct dnode_of_data tdn = *dn;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900358 nid_t ino, nid;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900359 struct inode *inode;
Jaegeuk Kimde936532013-08-12 21:08:03 +0900360 unsigned int offset;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900361 block_t bidx;
362 int i;
363
364 sentry = get_seg_entry(sbi, segno);
365 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900366 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900367
368 /* Get the previous summary */
Jaegeuk Kim125c9fb2017-08-12 21:33:23 -0700369 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900370 struct curseg_info *curseg = CURSEG_I(sbi, i);
371 if (curseg->segno == segno) {
372 sum = curseg->sum_blk->entries[blkoff];
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900373 goto got_it;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900374 }
375 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900376
Chao Yu4d57b862018-05-30 00:20:41 +0800377 sum_page = f2fs_get_sum_page(sbi, segno);
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900378 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
379 sum = sum_node->entries[blkoff];
380 f2fs_put_page(sum_page, 1);
381got_it:
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900382 /* Use the locked dnode page and inode */
383 nid = le32_to_cpu(sum.nid);
384 if (dn->inode->i_ino == nid) {
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900385 tdn.nid = nid;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700386 if (!dn->inode_page_locked)
387 lock_page(dn->inode_page);
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900388 tdn.node_page = dn->inode_page;
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900389 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700390 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900391 } else if (dn->nid == nid) {
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900392 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700393 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900394 }
395
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900396 /* Get the node page */
Chao Yu4d57b862018-05-30 00:20:41 +0800397 node_page = f2fs_get_node_page(sbi, nid);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900398 if (IS_ERR(node_page))
399 return PTR_ERR(node_page);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900400
401 offset = ofs_of_node(node_page);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900402 ino = ino_of_node(node_page);
403 f2fs_put_page(node_page, 1);
404
Jaegeuk Kim60979112014-09-13 00:35:58 +0900405 if (ino != dn->inode->i_ino) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800406 int ret;
407
Jaegeuk Kim60979112014-09-13 00:35:58 +0900408 /* Deallocate previous index in the node page */
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700409 inode = f2fs_iget_retry(sbi->sb, ino);
Jaegeuk Kim60979112014-09-13 00:35:58 +0900410 if (IS_ERR(inode))
411 return PTR_ERR(inode);
Chao Yu4b2414d2017-08-08 10:54:31 +0800412
413 ret = dquot_initialize(inode);
414 if (ret) {
415 iput(inode);
416 return ret;
417 }
Jaegeuk Kim60979112014-09-13 00:35:58 +0900418 } else {
419 inode = dn->inode;
420 }
Namjae Jeon06025f42012-12-22 12:09:43 +0900421
Chao Yu4d57b862018-05-30 00:20:41 +0800422 bidx = f2fs_start_bidx_of_node(offset, inode) +
423 le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900424
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700425 /*
426 * if inode page is locked, unlock temporarily, but its reference
427 * count keeps alive.
428 */
429 if (ino == dn->inode->i_ino && dn->inode_page_locked)
430 unlock_page(dn->inode_page);
431
432 set_new_dnode(&tdn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +0800433 if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700434 goto out;
435
436 if (tdn.data_blkaddr == blkaddr)
Chao Yu4d57b862018-05-30 00:20:41 +0800437 f2fs_truncate_data_blocks_range(&tdn, 1);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700438
439 f2fs_put_dnode(&tdn);
440out:
441 if (ino != dn->inode->i_ino)
Jaegeuk Kim60979112014-09-13 00:35:58 +0900442 iput(inode);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700443 else if (dn->inode_page_locked)
444 lock_page(dn->inode_page);
445 return 0;
446
447truncate_out:
Chao Yu7a2af762017-07-19 00:19:06 +0800448 if (datablock_addr(tdn.inode, tdn.node_page,
449 tdn.ofs_in_node) == blkaddr)
Chao Yu4d57b862018-05-30 00:20:41 +0800450 f2fs_truncate_data_blocks_range(&tdn, 1);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700451 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
452 unlock_page(dn->inode_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900453 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900454}
455
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900456static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
Sheng Yonge17d4882017-11-22 18:23:40 +0800457 struct page *page)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900458{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900459 struct dnode_of_data dn;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900460 struct node_info ni;
Chao Yu81ca7352016-01-26 15:39:35 +0800461 unsigned int start, end;
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900462 int err = 0, recovered = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900463
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700464 /* step 1: recover xattr */
465 if (IS_INODE(page)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800466 f2fs_recover_inline_xattr(inode, page);
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700467 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
Chao Yu4d57b862018-05-30 00:20:41 +0800468 err = f2fs_recover_xattr_data(inode, page);
Chao Yud2600812017-02-08 17:39:45 +0800469 if (!err)
470 recovered++;
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700471 goto out;
472 }
Chao Yu70cfed82014-08-02 15:26:04 +0800473
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700474 /* step 2: recover inline data */
Chao Yu4d57b862018-05-30 00:20:41 +0800475 if (f2fs_recover_inline_data(inode, page))
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900476 goto out;
477
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700478 /* step 3: recover data indices */
Chao Yu4d57b862018-05-30 00:20:41 +0800479 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
Chao Yu81ca7352016-01-26 15:39:35 +0800480 end = start + ADDRS_PER_PAGE(page, inode);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900481
482 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700483retry_dn:
Chao Yu4d57b862018-05-30 00:20:41 +0800484 err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700485 if (err) {
486 if (err == -ENOMEM) {
487 congestion_wait(BLK_RW_ASYNC, HZ/50);
488 goto retry_dn;
489 }
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900490 goto out;
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700491 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900492
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800493 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900494
Chao Yu77357302018-07-17 00:02:17 +0800495 err = f2fs_get_node_info(sbi, dn.nid, &ni);
496 if (err)
497 goto err;
498
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700499 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
500 f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900501
Chao Yu12a83432015-08-05 17:23:54 +0800502 for (; start < end; start++, dn.ofs_in_node++) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900503 block_t src, dest;
504
Chao Yu7a2af762017-07-19 00:19:06 +0800505 src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
506 dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900507
Chao Yu12a83432015-08-05 17:23:54 +0800508 /* skip recovering if dest is the same as src */
509 if (src == dest)
510 continue;
511
512 /* dest is invalid, just invalidate src block */
513 if (dest == NULL_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800514 f2fs_truncate_data_blocks_range(&dn, 1);
Chao Yu12a83432015-08-05 17:23:54 +0800515 continue;
516 }
517
Jaegeuk Kim26787232016-11-28 15:33:38 -0800518 if (!file_keep_isize(inode) &&
Chao Yudba79f32017-01-25 10:52:39 +0800519 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
520 f2fs_i_size_write(inode,
521 (loff_t)(start + 1) << PAGE_SHIFT);
Jaegeuk Kim26de9b12016-05-20 20:42:37 -0700522
Chao Yu12a83432015-08-05 17:23:54 +0800523 /*
524 * dest is reserved block, invalidate src block
525 * and then reserve one new block in dnode page.
526 */
527 if (dest == NEW_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800528 f2fs_truncate_data_blocks_range(&dn, 1);
529 f2fs_reserve_new_block(&dn);
Chao Yu12a83432015-08-05 17:23:54 +0800530 continue;
531 }
532
533 /* dest is valid block, try to recover from src to dest */
Chao Yue1da7872018-06-05 17:44:11 +0800534 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
Jaegeuk Kime03b07d2015-04-01 19:38:20 -0700535
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900536 if (src == NULL_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +0800537 err = f2fs_reserve_new_block(&dn);
Arnd Bergmann7fa750a2018-08-13 23:38:06 +0200538 while (err &&
539 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
Chao Yu4d57b862018-05-30 00:20:41 +0800540 err = f2fs_reserve_new_block(&dn);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900541 /* We should not get -ENOSPC */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700542 f2fs_bug_on(sbi, err);
Jaegeuk Kim6f3ec992016-07-19 19:30:06 -0700543 if (err)
544 goto err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900545 }
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700546retry_prev:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900547 /* Check the previous node page having this index */
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900548 err = check_index_in_prev_nodes(sbi, dest, &dn);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700549 if (err) {
550 if (err == -ENOMEM) {
551 congestion_wait(BLK_RW_ASYNC, HZ/50);
552 goto retry_prev;
553 }
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900554 goto err;
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700555 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900556
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900557 /* write dummy data page */
Chao Yu528e3452015-05-28 19:15:35 +0800558 f2fs_replace_block(sbi, &dn, src, dest,
Chao Yu28bc1062016-02-06 14:40:34 +0800559 ni.version, false, false);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900560 recovered++;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900561 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900562 }
563
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900564 copy_node_footer(dn.node_page, page);
565 fill_node_footer(dn.node_page, dn.nid, ni.ino,
566 ofs_of_node(page), false);
567 set_page_dirty(dn.node_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900568err:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900569 f2fs_put_dnode(&dn);
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900570out:
Chris Fries6c311ec2014-01-17 14:44:39 -0600571 f2fs_msg(sbi->sb, KERN_NOTICE,
Jaegeuk Kim26787232016-11-28 15:33:38 -0800572 "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
573 inode->i_ino,
574 file_keep_isize(inode) ? "keep" : "recover",
575 recovered, err);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900576 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900577}
578
Chao Yuf61cce52016-05-07 16:15:05 +0800579static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
580 struct list_head *dir_list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900581{
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900582 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700583 struct page *page = NULL;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900584 int err = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900585 block_t blkaddr;
586
587 /* get node pages in the current segment */
Chao Yub7973f22015-12-01 11:43:59 +0800588 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900589 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
590
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900591 while (1) {
592 struct fsync_inode_entry *entry;
593
Chao Yue1da7872018-06-05 17:44:11 +0800594 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900595 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900596
Chao Yu4d57b862018-05-30 00:20:41 +0800597 f2fs_ra_meta_pages_cond(sbi, blkaddr);
Chao Yu635aee12014-12-08 15:02:52 +0800598
Chao Yu4d57b862018-05-30 00:20:41 +0800599 page = f2fs_get_tmp_page(sbi, blkaddr);
Chao Yu77357302018-07-17 00:02:17 +0800600 if (IS_ERR(page)) {
601 err = PTR_ERR(page);
602 break;
603 }
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700604
Jaegeuk Kima468f0e2016-09-19 17:55:10 -0700605 if (!is_recoverable_dnode(page)) {
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700606 f2fs_put_page(page, 1);
607 break;
608 }
609
Chao Yuf61cce52016-05-07 16:15:05 +0800610 entry = get_fsync_inode(inode_list, ino_of_node(page));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900611 if (!entry)
612 goto next;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700613 /*
614 * inode(x) | CP | inode(x) | dnode(F)
615 * In this case, we can lose the latest inode(x).
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700616 * So, call recover_inode for the inode update.
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700617 */
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700618 if (IS_INODE(page))
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700619 recover_inode(entry->inode, page);
620 if (entry->last_dentry == blkaddr) {
Chao Yuf61cce52016-05-07 16:15:05 +0800621 err = recover_dentry(entry->inode, page, dir_list);
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700622 if (err) {
623 f2fs_put_page(page, 1);
624 break;
625 }
626 }
Sheng Yonge17d4882017-11-22 18:23:40 +0800627 err = do_recover_data(sbi, entry->inode, page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700628 if (err) {
629 f2fs_put_page(page, 1);
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900630 break;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700631 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900632
Chao Yu3f8ab272016-04-29 20:13:37 +0800633 if (entry->blkaddr == blkaddr)
634 del_fsync_inode(entry);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900635next:
636 /* check next segment */
637 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700638 f2fs_put_page(page, 1);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900639 }
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900640 if (!err)
Chao Yu4d57b862018-05-30 00:20:41 +0800641 f2fs_allocate_new_segments(sbi);
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900642 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900643}
644
Chao Yu4d57b862018-05-30 00:20:41 +0800645int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900646{
647 struct list_head inode_list;
Chao Yuf61cce52016-05-07 16:15:05 +0800648 struct list_head dir_list;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900649 int err;
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700650 int ret = 0;
Chao Yu4b2414d2017-08-08 10:54:31 +0800651 unsigned long s_flags = sbi->sb->s_flags;
Haicheng Liaabe5132013-10-23 12:39:32 +0800652 bool need_writecp = false;
Jaegeuk Kimea676732017-10-06 09:14:28 -0700653#ifdef CONFIG_QUOTA
654 int quota_enabled;
655#endif
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900656
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800657 if (s_flags & SB_RDONLY) {
Yunlei Hee6b0b152018-07-19 14:57:14 +0800658 f2fs_msg(sbi->sb, KERN_INFO,
659 "recover fsync data on readonly fs");
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800660 sbi->sb->s_flags &= ~SB_RDONLY;
Chao Yu4b2414d2017-08-08 10:54:31 +0800661 }
662
663#ifdef CONFIG_QUOTA
664 /* Needed for iput() to work correctly and not trash data */
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800665 sbi->sb->s_flags |= SB_ACTIVE;
Chao Yu4b2414d2017-08-08 10:54:31 +0800666 /* Turn on quotas so that they are updated correctly */
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800667 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
Chao Yu4b2414d2017-08-08 10:54:31 +0800668#endif
669
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900670 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +0800671 sizeof(struct fsync_inode_entry));
Chao Yu4b2414d2017-08-08 10:54:31 +0800672 if (!fsync_entry_slab) {
673 err = -ENOMEM;
674 goto out;
675 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900676
677 INIT_LIST_HEAD(&inode_list);
Chao Yuf61cce52016-05-07 16:15:05 +0800678 INIT_LIST_HEAD(&dir_list);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900679
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700680 /* prevent checkpoint */
681 mutex_lock(&sbi->cp_mutex);
682
Jaegeuk Kim315df832015-08-11 12:45:39 -0700683 /* step #1: find fsynced inode numbers */
Jaegeuk Kimd40d30c2017-04-14 15:46:23 -0700684 err = find_fsync_dnodes(sbi, &inode_list, check_only);
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700685 if (err || list_empty(&inode_list))
Chao Yu4b2414d2017-08-08 10:54:31 +0800686 goto skip;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900687
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700688 if (check_only) {
689 ret = 1;
Chao Yu4b2414d2017-08-08 10:54:31 +0800690 goto skip;
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700691 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900692
Haicheng Liaabe5132013-10-23 12:39:32 +0800693 need_writecp = true;
Chao Yu691c6fd2013-09-24 09:26:24 +0800694
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900695 /* step #2: recover data */
Chao Yuf61cce52016-05-07 16:15:05 +0800696 err = recover_data(sbi, &inode_list, &dir_list);
Jaegeuk Kimb3073842014-08-08 10:18:43 -0700697 if (!err)
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700698 f2fs_bug_on(sbi, !list_empty(&inode_list));
Chao Yu4b2414d2017-08-08 10:54:31 +0800699skip:
Gu Zheng5ebefc52013-06-27 09:28:54 +0800700 destroy_fsync_dnodes(&inode_list);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700701
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700702 /* truncate meta pages to be used by the recovery */
703 truncate_inode_pages_range(META_MAPPING(sbi),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300704 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700705
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700706 if (err) {
707 truncate_inode_pages_final(NODE_MAPPING(sbi));
708 truncate_inode_pages_final(META_MAPPING(sbi));
709 }
710
Chao Yucaf00472015-01-28 17:48:42 +0800711 clear_sbi_flag(sbi, SBI_POR_DOING);
Jaegeuk Kima468f0e2016-09-19 17:55:10 -0700712 mutex_unlock(&sbi->cp_mutex);
713
Jaegeuk Kim9e1e6df2016-09-19 18:13:54 -0700714 /* let's drop all the directory inodes for clean checkpoint */
715 destroy_fsync_dnodes(&dir_list);
716
Chao Yu13787522018-08-22 17:11:05 +0800717 if (need_writecp) {
718 set_sbi_flag(sbi, SBI_IS_RECOVERED);
719
720 if (!err) {
721 struct cp_control cpc = {
722 .reason = CP_RECOVERY,
723 };
724 err = f2fs_write_checkpoint(sbi, &cpc);
725 }
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700726 }
Chao Yuf61cce52016-05-07 16:15:05 +0800727
Chao Yuf61cce52016-05-07 16:15:05 +0800728 kmem_cache_destroy(fsync_entry_slab);
Chao Yu4b2414d2017-08-08 10:54:31 +0800729out:
730#ifdef CONFIG_QUOTA
731 /* Turn quotas off */
Jaegeuk Kimea676732017-10-06 09:14:28 -0700732 if (quota_enabled)
733 f2fs_quota_off_umount(sbi->sb);
Chao Yu4b2414d2017-08-08 10:54:31 +0800734#endif
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800735 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
Chao Yu4b2414d2017-08-08 10:54:31 +0800736
Jaegeuk Kim6781eab2016-03-23 16:12:58 -0700737 return ret ? ret: err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900738}