blob: 0ec8e32a00b47a11448e92d57d6475a4877e4888 [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +09003 * fs/f2fs/inode.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +09007 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/buffer_head.h>
11#include <linux/writeback.h>
NeilBrown40342472022-01-14 14:07:14 -080012#include <linux/sched/mm.h>
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090013
14#include "f2fs.h"
15#include "node.h"
Chao Yu0eb0ada2017-06-14 23:00:56 +080016#include "segment.h"
Chao Yudd6c89b2019-03-04 17:19:04 +080017#include "xattr.h"
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090018
Namjae Jeona2a4a7e2013-04-20 01:28:40 +090019#include <trace/events/f2fs.h>
20
Chao Yu6ce19af2021-05-20 19:51:50 +080021#ifdef CONFIG_F2FS_FS_COMPRESSION
22extern const struct address_space_operations f2fs_compress_aops;
23#endif
24
Jaegeuk Kim7c457292016-10-14 11:51:23 -070025void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
Jaegeuk Kimb56ab832016-06-30 19:09:37 -070026{
Daeho Jeong9ac1e2d2018-01-11 11:26:19 +090027 if (is_inode_flag_set(inode, FI_NEW_INODE))
28 return;
29
Jaegeuk Kim7c457292016-10-14 11:51:23 -070030 if (f2fs_inode_dirtied(inode, sync))
Jaegeuk Kimb56ab832016-06-30 19:09:37 -070031 return;
Jaegeuk Kim7c457292016-10-14 11:51:23 -070032
Jaegeuk Kimb56ab832016-06-30 19:09:37 -070033 mark_inode_dirty_sync(inode);
34}
35
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090036void f2fs_set_inode_flags(struct inode *inode)
37{
38 unsigned int flags = F2FS_I(inode)->i_flags;
Zhang Zhen8abfb362014-04-15 14:19:38 +080039 unsigned int new_fl = 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090040
Chao Yu59c84402018-04-03 15:08:17 +080041 if (flags & F2FS_SYNC_FL)
Zhang Zhen8abfb362014-04-15 14:19:38 +080042 new_fl |= S_SYNC;
Chao Yu59c84402018-04-03 15:08:17 +080043 if (flags & F2FS_APPEND_FL)
Zhang Zhen8abfb362014-04-15 14:19:38 +080044 new_fl |= S_APPEND;
Chao Yu59c84402018-04-03 15:08:17 +080045 if (flags & F2FS_IMMUTABLE_FL)
Zhang Zhen8abfb362014-04-15 14:19:38 +080046 new_fl |= S_IMMUTABLE;
Chao Yu59c84402018-04-03 15:08:17 +080047 if (flags & F2FS_NOATIME_FL)
Zhang Zhen8abfb362014-04-15 14:19:38 +080048 new_fl |= S_NOATIME;
Chao Yu59c84402018-04-03 15:08:17 +080049 if (flags & F2FS_DIRSYNC_FL)
Zhang Zhen8abfb362014-04-15 14:19:38 +080050 new_fl |= S_DIRSYNC;
Chandan Rajendra62230e0d2018-12-12 15:20:11 +053051 if (file_is_encrypt(inode))
Eric Biggers2ee6a572017-10-09 12:15:35 -070052 new_fl |= S_ENCRYPTED;
Eric Biggers95ae2512019-07-22 09:26:24 -070053 if (file_is_verity(inode))
54 new_fl |= S_VERITY;
Daniel Rosenberg2c2eb7a2019-07-23 16:05:29 -070055 if (flags & F2FS_CASEFOLD_FL)
56 new_fl |= S_CASEFOLD;
Zhang Zhen6a678852015-08-24 10:41:32 +080057 inode_set_flags(inode, new_fl,
Eric Biggers2ee6a572017-10-09 12:15:35 -070058 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
Linus Torvaldsfbc246a2019-09-21 14:26:33 -070059 S_ENCRYPTED|S_VERITY|S_CASEFOLD);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090060}
61
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090062static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
63{
Chao Yu7a2af762017-07-19 00:19:06 +080064 int extra_size = get_extra_isize(inode);
65
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090066 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
67 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
Chao Yu7a2af762017-07-19 00:19:06 +080068 if (ri->i_addr[extra_size])
69 inode->i_rdev = old_decode_dev(
70 le32_to_cpu(ri->i_addr[extra_size]));
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090071 else
Chao Yu7a2af762017-07-19 00:19:06 +080072 inode->i_rdev = new_decode_dev(
73 le32_to_cpu(ri->i_addr[extra_size + 1]));
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090074 }
75}
76
Chao Yu91291e92018-07-10 23:01:45 +080077static int __written_first_block(struct f2fs_sb_info *sbi,
Chao Yue1da7872018-06-05 17:44:11 +080078 struct f2fs_inode *ri)
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -070079{
Chao Yu7a2af762017-07-19 00:19:06 +080080 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
Jaegeuk Kimadad81e2015-03-24 12:04:20 -070081
Chao Yu91291e92018-07-10 23:01:45 +080082 if (!__is_valid_data_blkaddr(addr))
83 return 1;
Chao Yu93770ab2019-04-15 15:26:32 +080084 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
Chao Yu10f966b2019-06-20 11:36:14 +080085 return -EFSCORRUPTED;
Chao Yu91291e92018-07-10 23:01:45 +080086 return 0;
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -070087}
88
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090089static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
90{
Chao Yu7a2af762017-07-19 00:19:06 +080091 int extra_size = get_extra_isize(inode);
92
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090093 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
94 if (old_valid_dev(inode->i_rdev)) {
Chao Yu7a2af762017-07-19 00:19:06 +080095 ri->i_addr[extra_size] =
Chris Fries6c311ec2014-01-17 14:44:39 -060096 cpu_to_le32(old_encode_dev(inode->i_rdev));
Chao Yu7a2af762017-07-19 00:19:06 +080097 ri->i_addr[extra_size + 1] = 0;
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090098 } else {
Chao Yu7a2af762017-07-19 00:19:06 +080099 ri->i_addr[extra_size] = 0;
100 ri->i_addr[extra_size + 1] =
Chris Fries6c311ec2014-01-17 14:44:39 -0600101 cpu_to_le32(new_encode_dev(inode->i_rdev));
Chao Yu7a2af762017-07-19 00:19:06 +0800102 ri->i_addr[extra_size + 2] = 0;
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +0900103 }
104 }
105}
106
Chao Yu9e5ba772015-01-06 14:28:43 +0800107static void __recover_inline_status(struct inode *inode, struct page *ipage)
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700108{
Chao Yuf2470372017-07-19 00:19:05 +0800109 void *inline_data = inline_data_addr(inode, ipage);
Chao Yu9e5ba772015-01-06 14:28:43 +0800110 __le32 *start = inline_data;
Chao Yuf2470372017-07-19 00:19:05 +0800111 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700112
Chao Yu9e5ba772015-01-06 14:28:43 +0800113 while (start < end) {
114 if (*start++) {
Chao Yubae0ee72018-12-25 17:43:42 +0800115 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700116
Jaegeuk Kim91942322016-05-20 10:13:22 -0700117 set_inode_flag(inode, FI_DATA_EXIST);
118 set_raw_inline(inode, F2FS_INODE(ipage));
Chao Yu9e5ba772015-01-06 14:28:43 +0800119 set_page_dirty(ipage);
120 return;
121 }
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700122 }
Chao Yu9e5ba772015-01-06 14:28:43 +0800123 return;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700124}
125
Chao Yu704956e2017-07-31 20:19:09 +0800126static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
127{
128 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
Chao Yu704956e2017-07-31 20:19:09 +0800129
Chao Yu7beb01f2018-10-24 18:34:26 +0800130 if (!f2fs_sb_has_inode_chksum(sbi))
Chao Yu704956e2017-07-31 20:19:09 +0800131 return false;
132
Chao Yu4c6b56c2018-07-08 22:16:54 +0800133 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
Chao Yu704956e2017-07-31 20:19:09 +0800134 return false;
135
Zhikang Zhangd6964942018-04-14 01:02:34 +0800136 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
137 i_inode_checksum))
Chao Yu704956e2017-07-31 20:19:09 +0800138 return false;
139
140 return true;
141}
142
143static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
144{
145 struct f2fs_node *node = F2FS_NODE(page);
146 struct f2fs_inode *ri = &node->i;
147 __le32 ino = node->footer.ino;
148 __le32 gen = ri->i_generation;
149 __u32 chksum, chksum_seed;
150 __u32 dummy_cs = 0;
151 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
152 unsigned int cs_size = sizeof(dummy_cs);
153
154 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
155 sizeof(ino));
156 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
157
158 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
159 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
160 offset += cs_size;
161 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
162 F2FS_BLKSIZE - offset);
163 return chksum;
164}
165
166bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
167{
168 struct f2fs_inode *ri;
169 __u32 provided, calculated;
170
Jaegeuk Kim83a3bfd2018-06-21 13:46:23 -0700171 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
172 return true;
173
Weichao Guo54c55c42018-03-09 23:10:21 +0800174#ifdef CONFIG_F2FS_CHECK_FS
175 if (!f2fs_enable_inode_chksum(sbi, page))
176#else
Jaegeuk Kimee605232017-08-31 16:54:51 -0700177 if (!f2fs_enable_inode_chksum(sbi, page) ||
178 PageDirty(page) || PageWriteback(page))
Weichao Guo54c55c42018-03-09 23:10:21 +0800179#endif
Chao Yu704956e2017-07-31 20:19:09 +0800180 return true;
181
182 ri = &F2FS_NODE(page)->i;
183 provided = le32_to_cpu(ri->i_inode_checksum);
184 calculated = f2fs_inode_chksum(sbi, page);
185
186 if (provided != calculated)
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800187 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
188 page->index, ino_of_node(page), provided, calculated);
Chao Yu704956e2017-07-31 20:19:09 +0800189
190 return provided == calculated;
191}
192
193void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
194{
195 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
196
197 if (!f2fs_enable_inode_chksum(sbi, page))
198 return;
199
200 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
201}
202
Chao Yue34438c2018-06-29 13:55:22 +0800203static bool sanity_check_inode(struct inode *inode, struct page *node_page)
Jaegeuk Kim5d646002018-04-24 11:37:18 -0600204{
205 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu18dd6472018-07-08 22:16:55 +0800206 struct f2fs_inode_info *fi = F2FS_I(inode);
Chao Yu4c8ff702019-11-01 18:07:14 +0800207 struct f2fs_inode *ri = F2FS_INODE(node_page);
Chao Yue34438c2018-06-29 13:55:22 +0800208 unsigned long long iblocks;
209
210 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
211 if (!iblocks) {
212 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800213 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
214 __func__, inode->i_ino, iblocks);
Chao Yue34438c2018-06-29 13:55:22 +0800215 return false;
216 }
217
218 if (ino_of_node(node_page) != nid_of_node(node_page)) {
219 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800220 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
221 __func__, inode->i_ino,
222 ino_of_node(node_page), nid_of_node(node_page));
Chao Yue34438c2018-06-29 13:55:22 +0800223 return false;
224 }
Jaegeuk Kim5d646002018-04-24 11:37:18 -0600225
Chao Yu7beb01f2018-10-24 18:34:26 +0800226 if (f2fs_sb_has_flexible_inline_xattr(sbi)
Jaegeuk Kim5d646002018-04-24 11:37:18 -0600227 && !f2fs_has_extra_attr(inode)) {
228 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800229 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
230 __func__, inode->i_ino);
Jaegeuk Kim5d646002018-04-24 11:37:18 -0600231 return false;
232 }
Chao Yu76d56d42018-06-25 23:29:49 +0800233
234 if (f2fs_has_extra_attr(inode) &&
Chao Yu7beb01f2018-10-24 18:34:26 +0800235 !f2fs_sb_has_extra_attr(sbi)) {
Chao Yu76d56d42018-06-25 23:29:49 +0800236 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800237 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
238 __func__, inode->i_ino);
Chao Yu76d56d42018-06-25 23:29:49 +0800239 return false;
240 }
Chao Yuc9b60782018-08-01 19:13:44 +0800241
Chao Yu18dd6472018-07-08 22:16:55 +0800242 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
243 fi->i_extra_isize % sizeof(__le32)) {
244 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800245 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
246 __func__, inode->i_ino, fi->i_extra_isize,
247 F2FS_TOTAL_EXTRA_ATTR_SIZE);
Chao Yu18dd6472018-07-08 22:16:55 +0800248 return false;
249 }
250
Chao Yudd6c89b2019-03-04 17:19:04 +0800251 if (f2fs_has_extra_attr(inode) &&
252 f2fs_sb_has_flexible_inline_xattr(sbi) &&
253 f2fs_has_inline_xattr(inode) &&
254 (!fi->i_inline_xattr_size ||
255 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
256 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800257 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
258 __func__, inode->i_ino, fi->i_inline_xattr_size,
259 MAX_INLINE_XATTR_SIZE);
Chao Yudd6c89b2019-03-04 17:19:04 +0800260 return false;
261 }
262
Chao Yuc9b60782018-08-01 19:13:44 +0800263 if (F2FS_I(inode)->extent_tree) {
264 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
265
266 if (ei->len &&
Chao Yu93770ab2019-04-15 15:26:32 +0800267 (!f2fs_is_valid_blkaddr(sbi, ei->blk,
268 DATA_GENERIC_ENHANCE) ||
Chao Yuc9b60782018-08-01 19:13:44 +0800269 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
Chao Yu93770ab2019-04-15 15:26:32 +0800270 DATA_GENERIC_ENHANCE))) {
Chao Yuc9b60782018-08-01 19:13:44 +0800271 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800272 f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
273 __func__, inode->i_ino,
274 ei->blk, ei->fofs, ei->len);
Chao Yuc9b60782018-08-01 19:13:44 +0800275 return false;
276 }
277 }
Chao Yubcbfbd62018-06-29 00:19:25 +0800278
279 if (f2fs_has_inline_data(inode) &&
280 (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
281 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800282 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
283 __func__, inode->i_ino, inode->i_mode);
Chao Yubcbfbd62018-06-29 00:19:25 +0800284 return false;
285 }
286
287 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
288 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800289 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
290 __func__, inode->i_ino, inode->i_mode);
Chao Yubcbfbd62018-06-29 00:19:25 +0800291 return false;
292 }
293
Eric Biggersf6322f32020-10-08 12:15:22 -0700294 if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
295 set_sbi_flag(sbi, SBI_NEED_FSCK);
296 f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
297 __func__, inode->i_ino);
298 return false;
299 }
300
Chao Yu4c8ff702019-11-01 18:07:14 +0800301 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
302 fi->i_flags & F2FS_COMPR_FL &&
303 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
304 i_log_cluster_size)) {
Chao Yud940aa02020-02-25 18:26:46 +0800305 if (ri->i_compress_algorithm >= COMPRESS_MAX) {
Chao Yud662fad2020-10-09 10:40:48 +0800306 set_sbi_flag(sbi, SBI_NEED_FSCK);
Chao Yud940aa02020-02-25 18:26:46 +0800307 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
308 "compress algorithm: %u, run fsck to fix",
309 __func__, inode->i_ino,
310 ri->i_compress_algorithm);
Chao Yu4c8ff702019-11-01 18:07:14 +0800311 return false;
Chao Yud940aa02020-02-25 18:26:46 +0800312 }
313 if (le64_to_cpu(ri->i_compr_blocks) >
314 SECTOR_TO_BLOCK(inode->i_blocks)) {
Chao Yud662fad2020-10-09 10:40:48 +0800315 set_sbi_flag(sbi, SBI_NEED_FSCK);
Chao Yud940aa02020-02-25 18:26:46 +0800316 f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
317 "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
318 __func__, inode->i_ino,
319 le64_to_cpu(ri->i_compr_blocks),
320 SECTOR_TO_BLOCK(inode->i_blocks));
Chao Yu4c8ff702019-11-01 18:07:14 +0800321 return false;
Chao Yud940aa02020-02-25 18:26:46 +0800322 }
Chao Yu4c8ff702019-11-01 18:07:14 +0800323 if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
Chao Yud940aa02020-02-25 18:26:46 +0800324 ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
Chao Yud662fad2020-10-09 10:40:48 +0800325 set_sbi_flag(sbi, SBI_NEED_FSCK);
Chao Yud940aa02020-02-25 18:26:46 +0800326 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
327 "log cluster size: %u, run fsck to fix",
328 __func__, inode->i_ino,
329 ri->i_log_cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +0800330 return false;
Chao Yud940aa02020-02-25 18:26:46 +0800331 }
Chao Yu4c8ff702019-11-01 18:07:14 +0800332 }
333
Jaegeuk Kim5d646002018-04-24 11:37:18 -0600334 return true;
335}
336
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900337static int do_read_inode(struct inode *inode)
338{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700339 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900340 struct f2fs_inode_info *fi = F2FS_I(inode);
341 struct page *node_page;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900342 struct f2fs_inode *ri;
Chao Yu5c571322017-07-26 00:01:41 +0800343 projid_t i_projid;
Chao Yu91291e92018-07-10 23:01:45 +0800344 int err;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900345
346 /* Check if ino is within scope */
Chao Yu4d57b862018-05-30 00:20:41 +0800347 if (f2fs_check_nid_range(sbi, inode->i_ino))
Namjae Jeon064e0822013-03-17 17:27:20 +0900348 return -EINVAL;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900349
Chao Yu4d57b862018-05-30 00:20:41 +0800350 node_page = f2fs_get_node_page(sbi, inode->i_ino);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900351 if (IS_ERR(node_page))
352 return PTR_ERR(node_page);
353
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900354 ri = F2FS_INODE(node_page);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900355
356 inode->i_mode = le16_to_cpu(ri->i_mode);
357 i_uid_write(inode, le32_to_cpu(ri->i_uid));
358 i_gid_write(inode, le32_to_cpu(ri->i_gid));
359 set_nlink(inode, le32_to_cpu(ri->i_links));
360 inode->i_size = le64_to_cpu(ri->i_size);
Chao Yu000519f2017-07-06 01:11:31 +0800361 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900362
363 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
364 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
365 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
366 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
367 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
368 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
369 inode->i_generation = le32_to_cpu(ri->i_generation);
Chao Yu1c41e6802018-05-07 20:28:52 +0800370 if (S_ISDIR(inode->i_mode))
371 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
372 else if (S_ISREG(inode->i_mode))
Chao Yu2ef79ec2018-05-07 20:28:54 +0800373 fi->i_gc_failures[GC_FAILURE_PIN] =
374 le16_to_cpu(ri->i_gc_failures);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900375 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
376 fi->i_flags = le32_to_cpu(ri->i_flags);
Wang Shilong5043a962019-06-13 16:29:53 +0900377 if (S_ISREG(inode->i_mode))
378 fi->i_flags &= ~F2FS_PROJINHERIT_FL;
Chao Yu7653b9d2020-03-23 11:18:07 +0800379 bitmap_zero(fi->flags, FI_MAX);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900380 fi->i_advise = ri->i_advise;
Jaegeuk Kim6666e6a2012-12-10 17:52:48 +0900381 fi->i_pino = le32_to_cpu(ri->i_pino);
Jaegeuk Kim38431542014-02-27 18:20:00 +0900382 fi->i_dir_level = ri->i_dir_level;
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +0900383
Chao Yua6d601f2020-06-28 10:58:17 +0800384 f2fs_init_extent_tree(inode, node_page);
Chao Yu0c872e22015-02-05 17:46:29 +0800385
Jaegeuk Kim91942322016-05-20 10:13:22 -0700386 get_inline_info(inode, ri);
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +0900387
Chao Yu7a2af762017-07-19 00:19:06 +0800388 fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
389 le16_to_cpu(ri->i_extra_isize) : 0;
390
Chao Yu7beb01f2018-10-24 18:34:26 +0800391 if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
Chao Yu6afc6622017-09-06 21:59:50 +0800392 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
393 } else if (f2fs_has_inline_xattr(inode) ||
394 f2fs_has_inline_dentry(inode)) {
395 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
396 } else {
397
398 /*
399 * Previous inline data or directory always reserved 200 bytes
400 * in inode layout, even if inline_xattr is disabled. In order
401 * to keep inline_dentry's structure for backward compatibility,
402 * we get the space back only from inline_data.
403 */
404 fi->i_inline_xattr_size = 0;
405 }
406
Chao Yu18dd6472018-07-08 22:16:55 +0800407 if (!sanity_check_inode(inode, node_page)) {
408 f2fs_put_page(node_page, 1);
Chao Yu10f966b2019-06-20 11:36:14 +0800409 return -EFSCORRUPTED;
Chao Yu18dd6472018-07-08 22:16:55 +0800410 }
411
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700412 /* check data exist */
413 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
Chao Yu9e5ba772015-01-06 14:28:43 +0800414 __recover_inline_status(inode, node_page);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700415
Chao Yuef2a0072018-10-03 22:32:44 +0800416 /* try to recover cold bit for non-dir inode */
417 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
Wuyun Zhaodb5ae362020-06-18 10:58:37 +0800418 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
Chao Yuef2a0072018-10-03 22:32:44 +0800419 set_cold_node(node_page, false);
420 set_page_dirty(node_page);
421 }
422
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +0900423 /* get rdev by using inline_info */
424 __get_inode_rdev(inode, ri);
425
Chao Yudda9f4b2018-08-11 23:42:09 +0800426 if (S_ISREG(inode->i_mode)) {
427 err = __written_first_block(sbi, ri);
428 if (err < 0) {
429 f2fs_put_page(node_page, 1);
430 return err;
431 }
432 if (!err)
433 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Chao Yu91291e92018-07-10 23:01:45 +0800434 }
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -0700435
Chao Yu4d57b862018-05-30 00:20:41 +0800436 if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
Jaegeuk Kim26de9b12016-05-20 20:42:37 -0700437 fi->last_disk_size = inode->i_size;
438
Chao Yu59c84402018-04-03 15:08:17 +0800439 if (fi->i_flags & F2FS_PROJINHERIT_FL)
Chao Yu5c571322017-07-26 00:01:41 +0800440 set_inode_flag(inode, FI_PROJ_INHERIT);
441
Chao Yu7beb01f2018-10-24 18:34:26 +0800442 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
Chao Yu5c571322017-07-26 00:01:41 +0800443 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
444 i_projid = (projid_t)le32_to_cpu(ri->i_projid);
445 else
446 i_projid = F2FS_DEF_PROJID;
447 fi->i_projid = make_kprojid(&init_user_ns, i_projid);
448
Chao Yu7beb01f2018-10-24 18:34:26 +0800449 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
Chao Yu1c1d35d2018-01-25 14:54:42 +0800450 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
451 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
452 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
453 }
454
Chao Yu4c8ff702019-11-01 18:07:14 +0800455 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
456 (fi->i_flags & F2FS_COMPR_FL)) {
457 if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
458 i_log_cluster_size)) {
Daeho Jeongc2759eb2020-09-08 11:44:10 +0900459 atomic_set(&fi->i_compr_blocks,
460 le64_to_cpu(ri->i_compr_blocks));
Chao Yu4c8ff702019-11-01 18:07:14 +0800461 fi->i_compress_algorithm = ri->i_compress_algorithm;
462 fi->i_log_cluster_size = ri->i_log_cluster_size;
Chao Yub28f0472020-11-26 18:32:09 +0800463 fi->i_compress_flag = le16_to_cpu(ri->i_compress_flag);
Chao Yu4c8ff702019-11-01 18:07:14 +0800464 fi->i_cluster_size = 1 << fi->i_log_cluster_size;
465 set_inode_flag(inode, FI_COMPRESSED_FILE);
466 }
467 }
468
Arnd Bergmann24b81df2018-06-20 10:02:19 +0200469 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
470 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
471 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
Jaegeuk Kim214c2462018-03-29 22:50:41 -0700472 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900473 f2fs_put_page(node_page, 1);
Jaegeuk Kim9d1015d2014-12-05 10:51:50 -0800474
Chao Yud5e8f6c2015-07-15 17:28:53 +0800475 stat_inc_inline_xattr(inode);
Jaegeuk Kim9d1015d2014-12-05 10:51:50 -0800476 stat_inc_inline_inode(inode);
477 stat_inc_inline_dir(inode);
Chao Yu4c8ff702019-11-01 18:07:14 +0800478 stat_inc_compr_inode(inode);
Daeho Jeongc2759eb2020-09-08 11:44:10 +0900479 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
Jaegeuk Kim9d1015d2014-12-05 10:51:50 -0800480
Chao Yu9e5ba772015-01-06 14:28:43 +0800481 return 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900482}
483
484struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
485{
486 struct f2fs_sb_info *sbi = F2FS_SB(sb);
487 struct inode *inode;
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900488 int ret = 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900489
490 inode = iget_locked(sb, ino);
491 if (!inode)
492 return ERR_PTR(-ENOMEM);
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900493
494 if (!(inode->i_state & I_NEW)) {
495 trace_f2fs_iget(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900496 return inode;
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900497 }
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900498 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
499 goto make_now;
500
Chao Yu6ce19af2021-05-20 19:51:50 +0800501#ifdef CONFIG_F2FS_FS_COMPRESSION
502 if (ino == F2FS_COMPRESS_INO(sbi))
503 goto make_now;
504#endif
505
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900506 ret = do_read_inode(inode);
507 if (ret)
508 goto bad_inode;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900509make_now:
510 if (ino == F2FS_NODE_INO(sbi)) {
511 inode->i_mapping->a_ops = &f2fs_node_aops;
Chao Yu81114ba2018-04-09 20:25:06 +0800512 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900513 } else if (ino == F2FS_META_INO(sbi)) {
514 inode->i_mapping->a_ops = &f2fs_meta_aops;
Chao Yu81114ba2018-04-09 20:25:06 +0800515 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
Chao Yu6ce19af2021-05-20 19:51:50 +0800516 } else if (ino == F2FS_COMPRESS_INO(sbi)) {
517#ifdef CONFIG_F2FS_FS_COMPRESSION
518 inode->i_mapping->a_ops = &f2fs_compress_aops;
Fengnan Changd1917862021-11-26 18:19:19 +0800519 /*
520 * generic_error_remove_page only truncates pages of regular
521 * inode
522 */
523 inode->i_mode |= S_IFREG;
Chao Yu6ce19af2021-05-20 19:51:50 +0800524#endif
525 mapping_set_gfp_mask(inode->i_mapping,
526 GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900527 } else if (S_ISREG(inode->i_mode)) {
528 inode->i_op = &f2fs_file_inode_operations;
529 inode->i_fop = &f2fs_file_operations;
530 inode->i_mapping->a_ops = &f2fs_dblock_aops;
531 } else if (S_ISDIR(inode->i_mode)) {
532 inode->i_op = &f2fs_dir_inode_operations;
533 inode->i_fop = &f2fs_dir_operations;
534 inode->i_mapping->a_ops = &f2fs_dblock_aops;
Jaegeuk Kim92d602b2021-09-07 10:24:21 -0700535 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900536 } else if (S_ISLNK(inode->i_mode)) {
Chandan Rajendra62230e0d2018-12-12 15:20:11 +0530537 if (file_is_encrypt(inode))
Jaegeuk Kimcbaf0422015-04-29 15:10:53 -0700538 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
539 else
540 inode->i_op = &f2fs_symlink_inode_operations;
Al Viro21fc61c2015-11-17 01:07:57 -0500541 inode_nohighmem(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900542 inode->i_mapping->a_ops = &f2fs_dblock_aops;
543 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
544 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
545 inode->i_op = &f2fs_special_inode_operations;
546 init_special_inode(inode, inode->i_mode, inode->i_rdev);
547 } else {
548 ret = -EIO;
549 goto bad_inode;
550 }
Jaegeuk Kim93607122017-05-16 13:20:16 -0700551 f2fs_set_inode_flags(inode);
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -0800552
553 if (file_should_truncate(inode)) {
554 ret = f2fs_truncate(inode);
555 if (ret)
556 goto bad_inode;
557 file_dont_truncate(inode);
558 }
559
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900560 unlock_new_inode(inode);
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900561 trace_f2fs_iget(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900562 return inode;
563
564bad_inode:
Chao Yu546d22f2019-04-15 15:28:33 +0800565 f2fs_inode_synced(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900566 iget_failed(inode);
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900567 trace_f2fs_iget_exit(inode, ret);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900568 return ERR_PTR(ret);
569}
570
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700571struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
572{
573 struct inode *inode;
574retry:
575 inode = f2fs_iget(sb, ino);
576 if (IS_ERR(inode)) {
577 if (PTR_ERR(inode) == -ENOMEM) {
NeilBrown40342472022-01-14 14:07:14 -0800578 memalloc_retry_wait(GFP_NOFS);
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700579 goto retry;
580 }
581 }
582 return inode;
583}
584
Chao Yu4d57b862018-05-30 00:20:41 +0800585void f2fs_update_inode(struct inode *inode, struct page *node_page)
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900586{
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900587 struct f2fs_inode *ri;
Chao Yub691d982016-10-11 22:57:05 +0800588 struct extent_tree *et = F2FS_I(inode)->extent_tree;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900589
Chao Yubae0ee72018-12-25 17:43:42 +0800590 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
Yunlei He211a6fa2017-12-05 12:07:47 +0800591 set_page_dirty(node_page);
592
593 f2fs_inode_synced(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900594
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900595 ri = F2FS_INODE(node_page);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900596
597 ri->i_mode = cpu_to_le16(inode->i_mode);
598 ri->i_advise = F2FS_I(inode)->i_advise;
599 ri->i_uid = cpu_to_le32(i_uid_read(inode));
600 ri->i_gid = cpu_to_le32(i_gid_read(inode));
601 ri->i_links = cpu_to_le32(inode->i_nlink);
602 ri->i_size = cpu_to_le64(i_size_read(inode));
Chao Yu000519f2017-07-06 01:11:31 +0800603 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
Chao Yu0c872e22015-02-05 17:46:29 +0800604
Chao Yub691d982016-10-11 22:57:05 +0800605 if (et) {
606 read_lock(&et->lock);
607 set_raw_extent(&et->largest, &ri->i_ext);
608 read_unlock(&et->lock);
609 } else {
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700610 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
Chao Yub691d982016-10-11 22:57:05 +0800611 }
Jaegeuk Kim91942322016-05-20 10:13:22 -0700612 set_raw_inline(inode, ri);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900613
614 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
615 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
616 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
617 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
618 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
619 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
Chao Yu1c41e6802018-05-07 20:28:52 +0800620 if (S_ISDIR(inode->i_mode))
621 ri->i_current_depth =
622 cpu_to_le32(F2FS_I(inode)->i_current_depth);
623 else if (S_ISREG(inode->i_mode))
Chao Yu2ef79ec2018-05-07 20:28:54 +0800624 ri->i_gc_failures =
625 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900626 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
627 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
Jaegeuk Kim6666e6a2012-12-10 17:52:48 +0900628 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900629 ri->i_generation = cpu_to_le32(inode->i_generation);
Jaegeuk Kim38431542014-02-27 18:20:00 +0900630 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
Changman Lee7d79e752013-01-23 09:40:23 +0900631
Chao Yu5c571322017-07-26 00:01:41 +0800632 if (f2fs_has_extra_attr(inode)) {
Chao Yu7a2af762017-07-19 00:19:06 +0800633 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
634
Chao Yu7beb01f2018-10-24 18:34:26 +0800635 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
Chao Yu6afc6622017-09-06 21:59:50 +0800636 ri->i_inline_xattr_size =
637 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
638
Chao Yu7beb01f2018-10-24 18:34:26 +0800639 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
Chao Yu5c571322017-07-26 00:01:41 +0800640 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
641 i_projid)) {
642 projid_t i_projid;
643
644 i_projid = from_kprojid(&init_user_ns,
645 F2FS_I(inode)->i_projid);
646 ri->i_projid = cpu_to_le32(i_projid);
647 }
Chao Yu1c1d35d2018-01-25 14:54:42 +0800648
Chao Yu7beb01f2018-10-24 18:34:26 +0800649 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
Chao Yu1c1d35d2018-01-25 14:54:42 +0800650 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
651 i_crtime)) {
652 ri->i_crtime =
653 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
654 ri->i_crtime_nsec =
655 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
656 }
Chao Yu4c8ff702019-11-01 18:07:14 +0800657
658 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
659 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
660 i_log_cluster_size)) {
661 ri->i_compr_blocks =
Daeho Jeongc2759eb2020-09-08 11:44:10 +0900662 cpu_to_le64(atomic_read(
663 &F2FS_I(inode)->i_compr_blocks));
Chao Yu4c8ff702019-11-01 18:07:14 +0800664 ri->i_compress_algorithm =
665 F2FS_I(inode)->i_compress_algorithm;
Chao Yub28f0472020-11-26 18:32:09 +0800666 ri->i_compress_flag =
667 cpu_to_le16(F2FS_I(inode)->i_compress_flag);
Chao Yu4c8ff702019-11-01 18:07:14 +0800668 ri->i_log_cluster_size =
669 F2FS_I(inode)->i_log_cluster_size;
670 }
Chao Yu5c571322017-07-26 00:01:41 +0800671 }
672
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +0900673 __set_inode_rdev(inode, ri);
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800674
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -0800675 /* deleted inode */
676 if (inode->i_nlink == 0)
Chao Yub763f3b2021-04-28 17:20:31 +0800677 clear_page_private_inline(node_page);
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -0800678
Arnd Bergmann24b81df2018-06-20 10:02:19 +0200679 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
680 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
681 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
Jaegeuk Kim214c2462018-03-29 22:50:41 -0700682 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
Weichao Guo54c55c42018-03-09 23:10:21 +0800683
684#ifdef CONFIG_F2FS_CHECK_FS
685 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
686#endif
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900687}
688
Chao Yu4d57b862018-05-30 00:20:41 +0800689void f2fs_update_inode_page(struct inode *inode)
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900690{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700691 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900692 struct page *node_page;
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900693retry:
Chao Yu4d57b862018-05-30 00:20:41 +0800694 node_page = f2fs_get_node_page(sbi, inode->i_ino);
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900695 if (IS_ERR(node_page)) {
696 int err = PTR_ERR(node_page);
Yi Zhuang5f029c02021-04-06 09:47:35 +0800697
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900698 if (err == -ENOMEM) {
699 cond_resched();
700 goto retry;
701 } else if (err != -ENOENT) {
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -0700702 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900703 }
Yunlei He211a6fa2017-12-05 12:07:47 +0800704 return;
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900705 }
Chao Yu4d57b862018-05-30 00:20:41 +0800706 f2fs_update_inode(inode, node_page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900707 f2fs_put_page(node_page, 1);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900708}
709
710int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
711{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700712 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900713
714 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
715 inode->i_ino == F2FS_META_INO(sbi))
716 return 0;
717
Chao Yufe1897e2019-09-27 18:01:35 +0800718 /*
719 * atime could be updated without dirtying f2fs inode in lazytime mode
720 */
721 if (f2fs_is_time_consistent(inode) &&
722 !is_inode_flag_set(inode, FI_DIRTY_INODE))
Jaegeuk Kimb3783872013-06-10 09:17:01 +0900723 return 0;
724
Chao Yu00e09c02019-08-23 17:58:36 +0800725 if (!f2fs_is_checkpoint_ready(sbi))
Daniel Rosenberg43549942018-08-20 19:21:43 -0700726 return -ENOSPC;
727
Jaegeuk Kim39936832012-11-22 16:21:29 +0900728 /*
Jaegeuk Kimc5cd29d2015-09-12 11:25:30 -0700729 * We need to balance fs here to prevent from producing dirty node pages
Ruiqi Gong2c718fe2021-03-25 02:38:11 -0400730 * during the urgent cleaning time when running out of free sections.
Jaegeuk Kim39936832012-11-22 16:21:29 +0900731 */
Chao Yu4d57b862018-05-30 00:20:41 +0800732 f2fs_update_inode_page(inode);
Jaegeuk Kima7881892017-04-20 13:51:57 -0700733 if (wbc && wbc->nr_to_write)
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -0800734 f2fs_balance_fs(sbi, true);
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900735 return 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900736}
737
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900738/*
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900739 * Called at the last iput() if i_nlink is zero
740 */
741void f2fs_evict_inode(struct inode *inode)
742{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700743 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700744 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
Chao Yu13ec7292015-08-24 17:40:45 +0800745 int err = 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900746
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700747 /* some remained atomic pages should discarded */
Jaegeuk Kim1e843712014-12-09 06:08:59 -0800748 if (f2fs_is_atomic_file(inode))
Chao Yu4d57b862018-05-30 00:20:41 +0800749 f2fs_drop_inmem_pages(inode);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700750
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900751 trace_f2fs_evict_inode(inode);
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700752 truncate_inode_pages_final(&inode->i_data);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900753
Fengnan Chang2b642892021-12-29 17:47:00 +0800754 if ((inode->i_nlink || is_bad_inode(inode)) &&
755 test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
Chao Yu6ce19af2021-05-20 19:51:50 +0800756 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
757
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900758 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
Chao Yu6ce19af2021-05-20 19:51:50 +0800759 inode->i_ino == F2FS_META_INO(sbi) ||
760 inode->i_ino == F2FS_COMPRESS_INO(sbi))
Chao Yudbf20cb2014-07-25 12:00:57 +0800761 goto out_clear;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900762
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700763 f2fs_bug_on(sbi, get_dirty_pages(inode));
Chao Yu4d57b862018-05-30 00:20:41 +0800764 f2fs_remove_dirty_inode(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900765
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700766 f2fs_destroy_extent_tree(inode);
767
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900768 if (inode->i_nlink || is_bad_inode(inode))
769 goto no_delete;
770
Chao Yu10a26872021-10-28 21:03:05 +0800771 err = f2fs_dquot_initialize(inode);
Chao Yuaf033b22018-09-20 20:05:00 +0800772 if (err) {
773 err = 0;
774 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
775 }
Chao Yu0abd6752017-07-09 00:13:07 +0800776
Chao Yu4d57b862018-05-30 00:20:41 +0800777 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
778 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
779 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
Chao Yu60dcedc2016-11-02 20:43:21 +0800780
Changman Leed6212a52013-01-29 18:30:07 +0900781 sb_start_intwrite(inode->i_sb);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700782 set_inode_flag(inode, FI_NO_ALLOC);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900783 i_size_write(inode, 0);
Jaegeuk Kim4c0c2942016-05-03 09:22:18 -0700784retry:
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900785 if (F2FS_HAS_BLOCKS(inode))
Jaegeuk Kim9a449e92016-06-02 13:49:38 -0700786 err = f2fs_truncate(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900787
Jaegeuk Kim8c1b3c02017-03-07 13:32:20 -0800788 if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
Chao Yuc45d6002019-11-01 17:53:23 +0800789 f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
Jaegeuk Kim8c1b3c02017-03-07 13:32:20 -0800790 err = -EIO;
791 }
Arnd Bergmann7fa750a2018-08-13 23:38:06 +0200792
Chao Yu13ec7292015-08-24 17:40:45 +0800793 if (!err) {
794 f2fs_lock_op(sbi);
Chao Yu4d57b862018-05-30 00:20:41 +0800795 err = f2fs_remove_inode_page(inode);
Chao Yu13ec7292015-08-24 17:40:45 +0800796 f2fs_unlock_op(sbi);
Chao Yua11b9f62016-10-11 22:56:59 +0800797 if (err == -ENOENT)
798 err = 0;
Chao Yu13ec7292015-08-24 17:40:45 +0800799 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900800
Jaegeuk Kim4c0c2942016-05-03 09:22:18 -0700801 /* give more chances, if ENOMEM case */
802 if (err == -ENOMEM) {
803 err = 0;
804 goto retry;
805 }
806
Chao Yuaf033b22018-09-20 20:05:00 +0800807 if (err) {
Chao Yu4d57b862018-05-30 00:20:41 +0800808 f2fs_update_inode_page(inode);
Chao Yu0f1898f2019-07-19 11:51:11 +0800809 if (dquot_initialize_needed(inode))
810 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
Chao Yuaf033b22018-09-20 20:05:00 +0800811 }
Changman Leed6212a52013-01-29 18:30:07 +0900812 sb_end_intwrite(inode->i_sb);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900813no_delete:
Chao Yu0abd6752017-07-09 00:13:07 +0800814 dquot_drop(inode);
815
Chao Yud5e8f6c2015-07-15 17:28:53 +0800816 stat_dec_inline_xattr(inode);
Jaegeuk Kim3289c062014-10-13 20:00:16 -0700817 stat_dec_inline_dir(inode);
Jaegeuk Kime7a2bf22014-10-14 10:29:50 -0700818 stat_dec_inline_inode(inode);
Chao Yu4c8ff702019-11-01 18:07:14 +0800819 stat_dec_compr_inode(inode);
Daeho Jeongc2759eb2020-09-08 11:44:10 +0900820 stat_sub_compr_blocks(inode,
821 atomic_read(&F2FS_I(inode)->i_compr_blocks));
Chao Yu0bdee482015-03-19 19:27:51 +0800822
Chao Yu33ac18a2019-08-15 19:45:35 +0800823 if (likely(!f2fs_cp_error(sbi) &&
Daniel Rosenberg43549942018-08-20 19:21:43 -0700824 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
Chao Yuca7d8022017-09-12 14:04:05 +0800825 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
Jaegeuk Kim943973c2017-10-12 19:12:53 -0700826 else
827 f2fs_inode_synced(inode);
Chao Yuca7d8022017-09-12 14:04:05 +0800828
Chao Yu7a88ddb2020-02-27 19:30:05 +0800829 /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
Jaegeuk Kim4f295442017-03-04 13:56:10 -0800830 if (inode->i_ino)
831 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
832 inode->i_ino);
Chao Yu002a41c2014-08-04 09:54:58 +0800833 if (xnid)
834 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
Chao Yu60dcedc2016-11-02 20:43:21 +0800835 if (inode->i_nlink) {
836 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
Chao Yu4d57b862018-05-30 00:20:41 +0800837 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
Chao Yu60dcedc2016-11-02 20:43:21 +0800838 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
Chao Yu4d57b862018-05-30 00:20:41 +0800839 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
Chao Yu60dcedc2016-11-02 20:43:21 +0800840 }
Jaegeuk Kim91942322016-05-20 10:13:22 -0700841 if (is_inode_flag_set(inode, FI_FREE_NID)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800842 f2fs_alloc_nid_failed(sbi, inode->i_ino);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700843 clear_inode_flag(inode, FI_FREE_NID);
Jaegeuk Kimd8c42562017-06-01 15:39:27 -0700844 } else {
Jaegeuk Kima4f843b2018-04-23 23:02:31 -0600845 /*
846 * If xattr nid is corrupted, we can reach out error condition,
Chao Yu4d57b862018-05-30 00:20:41 +0800847 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
848 * In that case, f2fs_check_nid_range() is enough to give a clue.
Jaegeuk Kima4f843b2018-04-23 23:02:31 -0600849 */
Jaegeuk Kimc9b63bd2015-06-23 10:36:08 -0700850 }
Chao Yudbf20cb2014-07-25 12:00:57 +0800851out_clear:
Eric Biggers3d204e22018-01-11 23:30:13 -0500852 fscrypt_put_encryption_info(inode);
Eric Biggers95ae2512019-07-22 09:26:24 -0700853 fsverity_cleanup_inode(inode);
Chao Yudbf20cb2014-07-25 12:00:57 +0800854 clear_inode(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900855}
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700856
857/* caller should call f2fs_lock_op() */
Chao Yu4d57b862018-05-30 00:20:41 +0800858void f2fs_handle_failed_inode(struct inode *inode)
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700859{
860 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700861 struct node_info ni;
Chao Yu77357302018-07-17 00:02:17 +0800862 int err;
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700863
Chao Yua11b9f62016-10-11 22:56:59 +0800864 /*
865 * clear nlink of inode in order to release resource of inode
866 * immediately.
867 */
868 clear_nlink(inode);
869
870 /*
871 * we must call this to avoid inode being remained as dirty, resulting
872 * in a panic when flushing dirty inodes in gdirty_list.
873 */
Chao Yu4d57b862018-05-30 00:20:41 +0800874 f2fs_update_inode_page(inode);
Jaegeuk Kim9bb02c32017-04-11 19:01:26 -0700875 f2fs_inode_synced(inode);
Chao Yua11b9f62016-10-11 22:56:59 +0800876
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700877 /* don't make bad inode, since it becomes a regular file. */
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700878 unlock_new_inode(inode);
879
Chao Yu13ec7292015-08-24 17:40:45 +0800880 /*
Chao Yu13ec7292015-08-24 17:40:45 +0800881 * Note: we should add inode to orphan list before f2fs_unlock_op()
882 * so we can prevent losing this orphan when encoutering checkpoint
883 * and following suddenly power-off.
884 */
Jaegeuk Kima9419b62021-12-13 14:16:32 -0800885 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
Chao Yu77357302018-07-17 00:02:17 +0800886 if (err) {
887 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800888 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
Chao Yu77357302018-07-17 00:02:17 +0800889 goto out;
890 }
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700891
892 if (ni.blk_addr != NULL_ADDR) {
Chao Yu77357302018-07-17 00:02:17 +0800893 err = f2fs_acquire_orphan_inode(sbi);
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700894 if (err) {
895 set_sbi_flag(sbi, SBI_NEED_FSCK);
Joe Perchesdcbb4c12019-06-18 17:48:42 +0800896 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700897 } else {
Chao Yu4d57b862018-05-30 00:20:41 +0800898 f2fs_add_orphan_inode(inode);
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700899 }
Chao Yu4d57b862018-05-30 00:20:41 +0800900 f2fs_alloc_nid_done(sbi, inode->i_ino);
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700901 } else {
Jaegeuk Kim91942322016-05-20 10:13:22 -0700902 set_inode_flag(inode, FI_FREE_NID);
Chao Yu13ec7292015-08-24 17:40:45 +0800903 }
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700904
Chao Yu77357302018-07-17 00:02:17 +0800905out:
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700906 f2fs_unlock_op(sbi);
907
908 /* iput will drop the inode object */
909 iput(inode);
910}