blob: 4e5257c763d014dc7490cf89cee10c1ce1bb536a [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09007 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/writeback.h>
13#include <linux/backing-dev.h>
Chao Yu8f46dca2015-07-14 18:56:10 +080014#include <linux/pagevec.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090015#include <linux/blkdev.h>
16#include <linux/bio.h>
Satya Tangirala27aacd22020-07-02 01:56:06 +000017#include <linux/blk-crypto.h>
Jaegeuk Kim4969c062019-07-01 19:15:29 -070018#include <linux/swap.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010019#include <linux/prefetch.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080020#include <linux/uio.h>
Jaegeuk Kimf1e88662015-04-09 11:20:42 -070021#include <linux/cleancache.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010022#include <linux/sched/signal.h>
Christoph Hellwig10c5db22020-05-23 09:30:11 +020023#include <linux/fiemap.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090024
25#include "f2fs.h"
26#include "node.h"
27#include "segment.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090028#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090029
Eric Biggers6dbb1792018-04-18 11:09:48 -070030#define NUM_PREALLOC_POST_READ_CTXS 128
31
32static struct kmem_cache *bio_post_read_ctx_cache;
Chao Yu0b20fce2019-09-30 18:53:25 +080033static struct kmem_cache *bio_entry_slab;
Eric Biggers6dbb1792018-04-18 11:09:48 -070034static mempool_t *bio_post_read_ctx_pool;
Chao Yuf5438052019-12-04 09:52:58 +080035static struct bio_set f2fs_bioset;
36
37#define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
38
39int __init f2fs_init_bioset(void)
40{
41 if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
42 0, BIOSET_NEED_BVECS))
43 return -ENOMEM;
44 return 0;
45}
46
47void f2fs_destroy_bioset(void)
48{
49 bioset_exit(&f2fs_bioset);
50}
51
Chao Yu36951b32016-11-16 10:41:20 +080052static bool __is_cp_guaranteed(struct page *page)
53{
54 struct address_space *mapping = page->mapping;
55 struct inode *inode;
56 struct f2fs_sb_info *sbi;
57
58 if (!mapping)
59 return false;
60
Chao Yu4c8ff702019-11-01 18:07:14 +080061 if (f2fs_is_compressed_page(page))
62 return false;
63
Chao Yu36951b32016-11-16 10:41:20 +080064 inode = mapping->host;
65 sbi = F2FS_I_SB(inode);
66
67 if (inode->i_ino == F2FS_META_INO(sbi) ||
Jack Qiua87aff12020-07-24 16:55:28 +080068 inode->i_ino == F2FS_NODE_INO(sbi) ||
Chao Yu36951b32016-11-16 10:41:20 +080069 S_ISDIR(inode->i_mode) ||
Chao Yue7a4feb2018-05-08 14:06:03 +080070 (S_ISREG(inode->i_mode) &&
Chao Yuaf033b22018-09-20 20:05:00 +080071 (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
Chao Yu36951b32016-11-16 10:41:20 +080072 is_cold_data(page))
73 return true;
74 return false;
75}
76
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -070077static enum count_type __read_io_type(struct page *page)
78{
Jaegeuk Kim4969c062019-07-01 19:15:29 -070079 struct address_space *mapping = page_file_mapping(page);
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -070080
81 if (mapping) {
82 struct inode *inode = mapping->host;
83 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
84
85 if (inode->i_ino == F2FS_META_INO(sbi))
86 return F2FS_RD_META;
87
88 if (inode->i_ino == F2FS_NODE_INO(sbi))
89 return F2FS_RD_NODE;
90 }
91 return F2FS_RD_DATA;
92}
93
Eric Biggers6dbb1792018-04-18 11:09:48 -070094/* postprocessing steps for read bios */
95enum bio_post_read_step {
Eric Biggers7f59b272021-01-04 22:33:02 -080096#ifdef CONFIG_FS_ENCRYPTION
97 STEP_DECRYPT = 1 << 0,
98#else
99 STEP_DECRYPT = 0, /* compile out the decryption-related code */
100#endif
101#ifdef CONFIG_F2FS_FS_COMPRESSION
102 STEP_DECOMPRESS = 1 << 1,
103#else
104 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
105#endif
106#ifdef CONFIG_FS_VERITY
107 STEP_VERITY = 1 << 2,
108#else
109 STEP_VERITY = 0, /* compile out the verity-related code */
110#endif
Eric Biggers6dbb1792018-04-18 11:09:48 -0700111};
112
113struct bio_post_read_ctx {
114 struct bio *bio;
Chao Yu4c8ff702019-11-01 18:07:14 +0800115 struct f2fs_sb_info *sbi;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700116 struct work_struct work;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700117 unsigned int enabled_steps;
118};
119
Eric Biggers7f59b272021-01-04 22:33:02 -0800120static void f2fs_finish_read_bio(struct bio *bio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900121{
Eric Biggers6dbb1792018-04-18 11:09:48 -0700122 struct bio_vec *bv;
Ming Lei6dc4f102019-02-15 19:13:19 +0800123 struct bvec_iter_all iter_all;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900124
Eric Biggers7f59b272021-01-04 22:33:02 -0800125 /*
126 * Update and unlock the bio's pagecache pages, and put the
127 * decompression context for any compressed pages.
128 */
Christoph Hellwig2b070cf2019-04-25 09:03:00 +0200129 bio_for_each_segment_all(bv, bio, iter_all) {
Eric Biggers7f59b272021-01-04 22:33:02 -0800130 struct page *page = bv->bv_page;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700131
Eric Biggers7f59b272021-01-04 22:33:02 -0800132 if (f2fs_is_compressed_page(page)) {
133 if (bio->bi_status)
134 f2fs_end_read_compressed_page(page, true);
135 f2fs_put_page_dic(page);
Chao Yu4c8ff702019-11-01 18:07:14 +0800136 continue;
137 }
Chao Yu4c8ff702019-11-01 18:07:14 +0800138
Eric Biggers7f59b272021-01-04 22:33:02 -0800139 /* PG_error was set if decryption or verity failed. */
Eric Biggers6dbb1792018-04-18 11:09:48 -0700140 if (bio->bi_status || PageError(page)) {
141 ClearPageUptodate(page);
Jaegeuk Kimfb7d70d2018-09-25 13:54:33 -0700142 /* will re-read again later */
143 ClearPageError(page);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700144 } else {
145 SetPageUptodate(page);
146 }
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -0700147 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
Eric Biggers6dbb1792018-04-18 11:09:48 -0700148 unlock_page(page);
149 }
Eric Biggers7f59b272021-01-04 22:33:02 -0800150
151 if (bio->bi_private)
152 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
153 bio_put(bio);
Chao Yu4c8ff702019-11-01 18:07:14 +0800154}
155
Eric Biggers7f59b272021-01-04 22:33:02 -0800156static void f2fs_verify_bio(struct work_struct *work)
Eric Biggers95ae2512019-07-22 09:26:24 -0700157{
158 struct bio_post_read_ctx *ctx =
159 container_of(work, struct bio_post_read_ctx, work);
Eric Biggers644c8c92019-12-31 12:14:16 -0600160 struct bio *bio = ctx->bio;
Eric Biggers7f59b272021-01-04 22:33:02 -0800161 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
Eric Biggers644c8c92019-12-31 12:14:16 -0600162
163 /*
164 * fsverity_verify_bio() may call readpages() again, and while verity
Eric Biggers7f59b272021-01-04 22:33:02 -0800165 * will be disabled for this, decryption and/or decompression may still
166 * be needed, resulting in another bio_post_read_ctx being allocated.
167 * So to prevent deadlocks we need to release the current ctx to the
168 * mempool first. This assumes that verity is the last post-read step.
Eric Biggers644c8c92019-12-31 12:14:16 -0600169 */
170 mempool_free(ctx, bio_post_read_ctx_pool);
171 bio->bi_private = NULL;
Eric Biggers95ae2512019-07-22 09:26:24 -0700172
Eric Biggers7f59b272021-01-04 22:33:02 -0800173 /*
174 * Verify the bio's pages with fs-verity. Exclude compressed pages,
175 * as those were handled separately by f2fs_end_read_compressed_page().
176 */
177 if (may_have_compressed_pages) {
178 struct bio_vec *bv;
179 struct bvec_iter_all iter_all;
Chao Yu4c8ff702019-11-01 18:07:14 +0800180
Eric Biggers7f59b272021-01-04 22:33:02 -0800181 bio_for_each_segment_all(bv, bio, iter_all) {
182 struct page *page = bv->bv_page;
183
184 if (!f2fs_is_compressed_page(page) &&
185 !PageError(page) && !fsverity_verify_page(page))
186 SetPageError(page);
187 }
188 } else {
189 fsverity_verify_bio(bio);
190 }
191
192 f2fs_finish_read_bio(bio);
193}
194
195/*
196 * If the bio's data needs to be verified with fs-verity, then enqueue the
197 * verity work for the bio. Otherwise finish the bio now.
198 *
199 * Note that to avoid deadlocks, the verity work can't be done on the
200 * decryption/decompression workqueue. This is because verifying the data pages
201 * can involve reading verity metadata pages from the file, and these verity
202 * metadata pages may be encrypted and/or compressed.
203 */
204static void f2fs_verify_and_finish_bio(struct bio *bio)
205{
206 struct bio_post_read_ctx *ctx = bio->bi_private;
207
208 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
209 INIT_WORK(&ctx->work, f2fs_verify_bio);
210 fsverity_enqueue_verify_work(&ctx->work);
211 } else {
212 f2fs_finish_read_bio(bio);
213 }
214}
215
216/*
217 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
218 * remaining page was read by @ctx->bio.
219 *
220 * Note that a bio may span clusters (even a mix of compressed and uncompressed
221 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
222 * that the bio includes at least one compressed page. The actual decompression
223 * is done on a per-cluster basis, not a per-bio basis.
224 */
225static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
226{
227 struct bio_vec *bv;
228 struct bvec_iter_all iter_all;
229 bool all_compressed = true;
230
231 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
232 struct page *page = bv->bv_page;
233
234 /* PG_error was set if decryption failed. */
235 if (f2fs_is_compressed_page(page))
236 f2fs_end_read_compressed_page(page, PageError(page));
237 else
238 all_compressed = false;
239 }
240
241 /*
242 * Optimization: if all the bio's pages are compressed, then scheduling
243 * the per-bio verity work is unnecessary, as verity will be fully
244 * handled at the compression cluster level.
245 */
246 if (all_compressed)
247 ctx->enabled_steps &= ~STEP_VERITY;
Chao Yu4c8ff702019-11-01 18:07:14 +0800248}
249
250static void f2fs_post_read_work(struct work_struct *work)
251{
252 struct bio_post_read_ctx *ctx =
253 container_of(work, struct bio_post_read_ctx, work);
254
Eric Biggers7f59b272021-01-04 22:33:02 -0800255 if (ctx->enabled_steps & STEP_DECRYPT)
256 fscrypt_decrypt_bio(ctx->bio);
Chao Yu4c8ff702019-11-01 18:07:14 +0800257
Eric Biggers7f59b272021-01-04 22:33:02 -0800258 if (ctx->enabled_steps & STEP_DECOMPRESS)
259 f2fs_handle_step_decompress(ctx);
Chao Yu4c8ff702019-11-01 18:07:14 +0800260
Eric Biggers7f59b272021-01-04 22:33:02 -0800261 f2fs_verify_and_finish_bio(ctx->bio);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700262}
263
264static void f2fs_read_end_io(struct bio *bio)
265{
Chao Yuc45d6002019-11-01 17:53:23 +0800266 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
Eric Biggers7f59b272021-01-04 22:33:02 -0800267 struct bio_post_read_ctx *ctx = bio->bi_private;
Chao Yuc45d6002019-11-01 17:53:23 +0800268
269 if (time_to_inject(sbi, FAULT_READ_IO)) {
270 f2fs_show_injection_info(sbi, FAULT_READ_IO);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200271 bio->bi_status = BLK_STS_IOERR;
Chao Yu55523512017-02-25 11:08:28 +0800272 }
Chao Yu8b038c72016-09-18 23:30:07 +0800273
Eric Biggers7f59b272021-01-04 22:33:02 -0800274 if (bio->bi_status) {
275 f2fs_finish_read_bio(bio);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700276 return;
Chao Yu12377022015-05-25 18:03:38 +0800277 }
278
Eric Biggers7f59b272021-01-04 22:33:02 -0800279 if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
280 INIT_WORK(&ctx->work, f2fs_post_read_work);
281 queue_work(ctx->sbi->post_read_wq, &ctx->work);
282 } else {
283 f2fs_verify_and_finish_bio(bio);
284 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900285}
286
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200287static void f2fs_write_end_io(struct bio *bio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900288{
Jaegeuk Kim1b1f5592014-02-03 10:50:22 +0900289 struct f2fs_sb_info *sbi = bio->bi_private;
Linus Torvaldsf5688492014-01-30 11:19:05 -0800290 struct bio_vec *bvec;
Ming Lei6dc4f102019-02-15 19:13:19 +0800291 struct bvec_iter_all iter_all;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900292
Chao Yu6f5c2ed2018-09-12 09:22:29 +0800293 if (time_to_inject(sbi, FAULT_WRITE_IO)) {
Chao Yuc45d6002019-11-01 17:53:23 +0800294 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
Chao Yu6f5c2ed2018-09-12 09:22:29 +0800295 bio->bi_status = BLK_STS_IOERR;
296 }
297
Christoph Hellwig2b070cf2019-04-25 09:03:00 +0200298 bio_for_each_segment_all(bvec, bio, iter_all) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900299 struct page *page = bvec->bv_page;
Chao Yu36951b32016-11-16 10:41:20 +0800300 enum count_type type = WB_DATA_TYPE(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900301
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800302 if (IS_DUMMY_WRITTEN_PAGE(page)) {
303 set_page_private(page, (unsigned long)NULL);
304 ClearPagePrivate(page);
305 unlock_page(page);
306 mempool_free(page, sbi->write_io_dummy);
307
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200308 if (unlikely(bio->bi_status))
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800309 f2fs_stop_checkpoint(sbi, true);
310 continue;
311 }
312
Eric Biggersd2d07272019-05-20 09:29:39 -0700313 fscrypt_finalize_bounce_page(&page);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700314
Chao Yu4c8ff702019-11-01 18:07:14 +0800315#ifdef CONFIG_F2FS_FS_COMPRESSION
316 if (f2fs_is_compressed_page(page)) {
317 f2fs_compress_write_end_io(bio, page);
318 continue;
319 }
320#endif
321
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200322 if (unlikely(bio->bi_status)) {
Michal Hocko5114a972016-10-11 13:56:01 -0700323 mapping_set_error(page->mapping, -EIO);
Jaegeuk Kimb1ca3212017-12-31 16:26:38 -0800324 if (type == F2FS_WB_CP_DATA)
325 f2fs_stop_checkpoint(sbi, true);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900326 }
Yunlei He7dff55d2018-01-11 14:19:32 +0800327
328 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
329 page->index != nid_of_node(page));
330
Chao Yu36951b32016-11-16 10:41:20 +0800331 dec_page_count(sbi, type);
Chao Yu50fa53e2018-08-02 23:03:19 +0800332 if (f2fs_in_warm_node_list(sbi, page))
333 f2fs_del_fsync_node_entry(sbi, page);
Chao Yu36951b32016-11-16 10:41:20 +0800334 clear_cold_data(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900335 end_page_writeback(page);
Linus Torvaldsf5688492014-01-30 11:19:05 -0800336 }
Chao Yu36951b32016-11-16 10:41:20 +0800337 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700338 wq_has_sleeper(&sbi->cp_wait))
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900339 wake_up(&sbi->cp_wait);
340
341 bio_put(bio);
342}
343
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700344struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
345 block_t blk_addr, struct bio *bio)
346{
347 struct block_device *bdev = sbi->sb->s_bdev;
348 int i;
349
Damien Le Moal09168782019-03-16 09:13:06 +0900350 if (f2fs_is_multi_device(sbi)) {
351 for (i = 0; i < sbi->s_ndevs; i++) {
352 if (FDEV(i).start_blk <= blk_addr &&
353 FDEV(i).end_blk >= blk_addr) {
354 blk_addr -= FDEV(i).start_blk;
355 bdev = FDEV(i).bdev;
356 break;
357 }
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700358 }
359 }
360 if (bio) {
Christoph Hellwig74d46992017-08-23 19:10:32 +0200361 bio_set_dev(bio, bdev);
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700362 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
363 }
364 return bdev;
365}
366
367int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
368{
369 int i;
370
Damien Le Moal09168782019-03-16 09:13:06 +0900371 if (!f2fs_is_multi_device(sbi))
372 return 0;
373
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700374 for (i = 0; i < sbi->s_ndevs; i++)
375 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
376 return i;
377 return 0;
378}
379
Chao Yub757f6e2019-08-23 17:58:35 +0800380static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
Gu Zheng940a6d32013-12-20 17:39:59 +0800381{
Chao Yub757f6e2019-08-23 17:58:35 +0800382 struct f2fs_sb_info *sbi = fio->sbi;
Gu Zheng940a6d32013-12-20 17:39:59 +0800383 struct bio *bio;
384
Christoph Hellwig67883ad2021-01-26 15:52:38 +0100385 bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
Gu Zheng940a6d32013-12-20 17:39:59 +0800386
Chao Yub757f6e2019-08-23 17:58:35 +0800387 f2fs_target_device(sbi, fio->new_blkaddr, bio);
388 if (is_read_io(fio->op)) {
Hyunchul Lee0cdd3192018-01-31 11:36:57 +0900389 bio->bi_end_io = f2fs_read_end_io;
390 bio->bi_private = NULL;
391 } else {
392 bio->bi_end_io = f2fs_write_end_io;
393 bio->bi_private = sbi;
Chao Yub757f6e2019-08-23 17:58:35 +0800394 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
395 fio->type, fio->temp);
Hyunchul Lee0cdd3192018-01-31 11:36:57 +0900396 }
Chao Yub757f6e2019-08-23 17:58:35 +0800397 if (fio->io_wbc)
398 wbc_init_bio(fio->io_wbc, bio);
Gu Zheng940a6d32013-12-20 17:39:59 +0800399
400 return bio;
401}
402
Satya Tangirala27aacd22020-07-02 01:56:06 +0000403static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
404 pgoff_t first_idx,
405 const struct f2fs_io_info *fio,
406 gfp_t gfp_mask)
407{
408 /*
409 * The f2fs garbage collector sets ->encrypted_page when it wants to
410 * read/write raw data without encryption.
411 */
412 if (!fio || !fio->encrypted_page)
413 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
414}
415
416static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
417 pgoff_t next_idx,
418 const struct f2fs_io_info *fio)
419{
420 /*
421 * The f2fs garbage collector sets ->encrypted_page when it wants to
422 * read/write raw data without encryption.
423 */
424 if (fio && fio->encrypted_page)
425 return !bio_has_crypt_ctx(bio);
426
427 return fscrypt_mergeable_bio(bio, inode, next_idx);
428}
429
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700430static inline void __submit_bio(struct f2fs_sb_info *sbi,
431 struct bio *bio, enum page_type type)
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700432{
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700433 if (!is_read_io(bio_op(bio))) {
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800434 unsigned int start;
435
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800436 if (type != DATA && type != NODE)
437 goto submit_io;
438
Chao Yub0332a02020-02-14 17:44:12 +0800439 if (f2fs_lfs_mode(sbi) && current->plug)
Tiezhu Yang3bb09a02018-02-06 08:21:45 +0800440 blk_finish_plug(current->plug);
441
Dehe Gu39f71b72021-02-02 17:39:22 +0800442 if (!F2FS_IO_ALIGNED(sbi))
Chao Yu8223ecc2019-08-28 17:33:38 +0800443 goto submit_io;
444
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800445 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
446 start %= F2FS_IO_SIZE(sbi);
447
448 if (start == 0)
449 goto submit_io;
450
451 /* fill dummy pages */
452 for (; start < F2FS_IO_SIZE(sbi); start++) {
453 struct page *page =
454 mempool_alloc(sbi->write_io_dummy,
Gao Xiangbc73a4b2019-02-19 10:31:52 +0800455 GFP_NOIO | __GFP_NOFAIL);
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800456 f2fs_bug_on(sbi, !page);
457
Gao Xiangbc73a4b2019-02-19 10:31:52 +0800458 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800459 SetPagePrivate(page);
Xiaojun Wange90027d2020-08-04 21:46:04 +0800460 set_page_private(page, DUMMY_WRITTEN_PAGE);
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800461 lock_page(page);
462 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
463 f2fs_bug_on(sbi, 1);
464 }
465 /*
466 * In the NODE case, we lose next block address chain. So, we
467 * need to do checkpoint in f2fs_sync_file.
468 */
469 if (type == NODE)
470 set_sbi_flag(sbi, SBI_NEED_CP);
Jaegeuk Kim19a5f5e2016-06-04 14:25:24 -0700471 }
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800472submit_io:
Jaegeuk Kim554b5122016-12-21 12:13:03 -0800473 if (is_read_io(bio_op(bio)))
474 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
475 else
476 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
Mike Christie4e49ea42016-06-05 14:31:41 -0500477 submit_bio(bio);
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700478}
479
Chao Yu4c8ff702019-11-01 18:07:14 +0800480void f2fs_submit_bio(struct f2fs_sb_info *sbi,
481 struct bio *bio, enum page_type type)
482{
483 __submit_bio(sbi, bio, type);
484}
485
Jaegeuk Kim32b6aba2020-06-04 11:49:43 -0700486static void __attach_io_flag(struct f2fs_io_info *fio)
Jaegeuk Kimda9953b2020-04-02 09:32:35 -0700487{
488 struct f2fs_sb_info *sbi = fio->sbi;
489 unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
Jaegeuk Kim32b6aba2020-06-04 11:49:43 -0700490 unsigned int io_flag, fua_flag, meta_flag;
491
492 if (fio->type == DATA)
493 io_flag = sbi->data_io_flag;
494 else if (fio->type == NODE)
495 io_flag = sbi->node_io_flag;
496 else
497 return;
498
499 fua_flag = io_flag & temp_mask;
500 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
501
Jaegeuk Kimda9953b2020-04-02 09:32:35 -0700502 /*
Jaegeuk Kim32b6aba2020-06-04 11:49:43 -0700503 * data/node io flag bits per temp:
Jaegeuk Kimda9953b2020-04-02 09:32:35 -0700504 * REQ_META | REQ_FUA |
505 * 5 | 4 | 3 | 2 | 1 | 0 |
506 * Cold | Warm | Hot | Cold | Warm | Hot |
507 */
Jaegeuk Kimda9953b2020-04-02 09:32:35 -0700508 if ((1 << fio->temp) & meta_flag)
509 fio->op_flags |= REQ_META;
510 if ((1 << fio->temp) & fua_flag)
511 fio->op_flags |= REQ_FUA;
512}
513
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900514static void __submit_merged_bio(struct f2fs_bio_info *io)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900515{
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900516 struct f2fs_io_info *fio = &io->fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900517
518 if (!io->bio)
519 return;
520
Jaegeuk Kim32b6aba2020-06-04 11:49:43 -0700521 __attach_io_flag(fio);
Mike Christie04d328d2016-06-05 14:31:55 -0500522 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
523
Jaegeuk Kim554b5122016-12-21 12:13:03 -0800524 if (is_read_io(fio->op))
525 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
526 else
527 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
528
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700529 __submit_bio(io->sbi, io->bio, fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900530 io->bio = NULL;
531}
532
Chao Yu8648de22019-02-19 16:15:29 +0800533static bool __has_merged_page(struct bio *bio, struct inode *inode,
Chao Yubab475c2018-09-27 23:41:16 +0800534 struct page *page, nid_t ino)
Chao Yu0fd785e2016-01-18 18:24:59 +0800535{
Chao Yu0fd785e2016-01-18 18:24:59 +0800536 struct bio_vec *bvec;
Ming Lei6dc4f102019-02-15 19:13:19 +0800537 struct bvec_iter_all iter_all;
Chao Yu0fd785e2016-01-18 18:24:59 +0800538
Chao Yu8648de22019-02-19 16:15:29 +0800539 if (!bio)
Chao Yu0fd785e2016-01-18 18:24:59 +0800540 return false;
Chao Yu0c3a5792016-01-18 18:28:11 +0800541
Chao Yubab475c2018-09-27 23:41:16 +0800542 if (!inode && !page && !ino)
Chao Yu0c3a5792016-01-18 18:28:11 +0800543 return true;
Chao Yu0fd785e2016-01-18 18:24:59 +0800544
Chao Yu8648de22019-02-19 16:15:29 +0800545 bio_for_each_segment_all(bvec, bio, iter_all) {
Chao Yu4c8ff702019-11-01 18:07:14 +0800546 struct page *target = bvec->bv_page;
Chao Yu0fd785e2016-01-18 18:24:59 +0800547
Chao Yu4c8ff702019-11-01 18:07:14 +0800548 if (fscrypt_is_bounce_page(target)) {
Eric Biggersd2d07272019-05-20 09:29:39 -0700549 target = fscrypt_pagecache_page(target);
Chao Yu4c8ff702019-11-01 18:07:14 +0800550 if (IS_ERR(target))
551 continue;
552 }
553 if (f2fs_is_compressed_page(target)) {
554 target = f2fs_compress_control_page(target);
555 if (IS_ERR(target))
556 continue;
557 }
Chao Yu0fd785e2016-01-18 18:24:59 +0800558
Chao Yu0c3a5792016-01-18 18:28:11 +0800559 if (inode && inode == target->mapping->host)
Chao Yu0fd785e2016-01-18 18:24:59 +0800560 return true;
Chao Yubab475c2018-09-27 23:41:16 +0800561 if (page && page == target)
562 return true;
Chao Yu0c3a5792016-01-18 18:28:11 +0800563 if (ino && ino == ino_of_node(target))
564 return true;
Chao Yu0fd785e2016-01-18 18:24:59 +0800565 }
566
Chao Yu0fd785e2016-01-18 18:24:59 +0800567 return false;
568}
569
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700570static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
Jaegeuk Kima912b542017-05-10 11:18:25 -0700571 enum page_type type, enum temp_type temp)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900572{
573 enum page_type btype = PAGE_TYPE_OF_BIO(type);
Jaegeuk Kima912b542017-05-10 11:18:25 -0700574 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900575
Chao Yudf0f8dc2014-03-22 14:57:23 +0800576 down_write(&io->io_rwsem);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900577
578 /* change META to META_FLUSH in the checkpoint procedure */
579 if (type >= META_FLUSH) {
580 io->fio.type = META_FLUSH;
Mike Christie04d328d2016-06-05 14:31:55 -0500581 io->fio.op = REQ_OP_WRITE;
Jan Kara3adc5fcb2017-05-02 17:03:47 +0200582 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600583 if (!test_opt(sbi, NOBARRIER))
Jaegeuk Kim7f54f512017-02-06 13:57:58 -0800584 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900585 }
586 __submit_merged_bio(io);
Chao Yudf0f8dc2014-03-22 14:57:23 +0800587 up_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900588}
589
Jaegeuk Kima912b542017-05-10 11:18:25 -0700590static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
Chao Yubab475c2018-09-27 23:41:16 +0800591 struct inode *inode, struct page *page,
592 nid_t ino, enum page_type type, bool force)
Chao Yu0c3a5792016-01-18 18:28:11 +0800593{
Jaegeuk Kima912b542017-05-10 11:18:25 -0700594 enum temp_type temp;
Yunlong Song1e771e82018-11-13 11:57:32 +0800595 bool ret = true;
Jaegeuk Kima912b542017-05-10 11:18:25 -0700596
597 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
Yunlong Song1e771e82018-11-13 11:57:32 +0800598 if (!force) {
599 enum page_type btype = PAGE_TYPE_OF_BIO(type);
600 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
Jaegeuk Kima912b542017-05-10 11:18:25 -0700601
Yunlong Song1e771e82018-11-13 11:57:32 +0800602 down_read(&io->io_rwsem);
Chao Yu8648de22019-02-19 16:15:29 +0800603 ret = __has_merged_page(io->bio, inode, page, ino);
Yunlong Song1e771e82018-11-13 11:57:32 +0800604 up_read(&io->io_rwsem);
605 }
606 if (ret)
607 __f2fs_submit_merged_write(sbi, type, temp);
Jaegeuk Kima912b542017-05-10 11:18:25 -0700608
609 /* TODO: use HOT temp only for meta pages now. */
610 if (type >= META)
611 break;
612 }
Chao Yu0c3a5792016-01-18 18:28:11 +0800613}
614
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700615void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
Chao Yu406657d2016-02-24 17:17:55 +0800616{
Hariprasad Kelamadcc00f2019-04-06 16:29:36 +0530617 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900618}
619
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700620void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
Chao Yubab475c2018-09-27 23:41:16 +0800621 struct inode *inode, struct page *page,
622 nid_t ino, enum page_type type)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900623{
Chao Yubab475c2018-09-27 23:41:16 +0800624 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900625}
626
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700627void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
Chao Yu406657d2016-02-24 17:17:55 +0800628{
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700629 f2fs_submit_merged_write(sbi, DATA);
630 f2fs_submit_merged_write(sbi, NODE);
631 f2fs_submit_merged_write(sbi, META);
Chao Yu406657d2016-02-24 17:17:55 +0800632}
633
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900634/*
635 * Fill the locked page with data located in the block address.
Tomohiro Kusumi771a9a72017-04-05 22:49:44 +0300636 * A caller needs to unlock the page on failure.
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900637 */
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700638int f2fs_submit_page_bio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900639{
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900640 struct bio *bio;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700641 struct page *page = fio->encrypted_page ?
642 fio->encrypted_page : fio->page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900643
Chao Yuc9b60782018-08-01 19:13:44 +0800644 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
Chao Yu93770ab2019-04-15 15:26:32 +0800645 fio->is_por ? META_POR : (__is_meta_io(fio) ?
646 META_GENERIC : DATA_GENERIC_ENHANCE)))
Chao Yu10f966b2019-06-20 11:36:14 +0800647 return -EFSCORRUPTED;
Chao Yuc9b60782018-08-01 19:13:44 +0800648
Chao Yu2ace38e2014-12-24 16:08:14 +0800649 trace_f2fs_submit_page_bio(page, fio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900650
651 /* Allocate a new bio */
Chao Yub757f6e2019-08-23 17:58:35 +0800652 bio = __bio_alloc(fio, 1);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900653
Satya Tangirala27aacd22020-07-02 01:56:06 +0000654 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
655 fio->page->index, fio, GFP_NOIO);
656
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300657 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900658 bio_put(bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900659 return -EFAULT;
660 }
Chao Yu78efac52018-10-22 23:24:28 +0800661
662 if (fio->io_wbc && !is_read_io(fio->op))
Tejun Heo34e51a52019-06-27 13:39:49 -0700663 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
Chao Yu78efac52018-10-22 23:24:28 +0800664
Jaegeuk Kimb7b911d2020-06-04 16:45:30 -0700665 __attach_io_flag(fio);
Mike Christie04d328d2016-06-05 14:31:55 -0500666 bio_set_op_attrs(bio, fio->op, fio->op_flags);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900667
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -0700668 inc_page_count(fio->sbi, is_read_io(fio->op) ?
669 __read_io_type(page): WB_DATA_TYPE(fio->page));
Chao Yu4c58ed02018-10-22 09:12:51 +0800670
671 __submit_bio(fio->sbi, bio, fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900672 return 0;
673}
674
Chao Yu8896cbd2019-07-12 16:55:41 +0800675static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
676 block_t last_blkaddr, block_t cur_blkaddr)
677{
Jaegeuk Kim10208562020-12-03 09:52:45 -0800678 if (unlikely(sbi->max_io_bytes &&
679 bio->bi_iter.bi_size >= sbi->max_io_bytes))
680 return false;
Chao Yu8896cbd2019-07-12 16:55:41 +0800681 if (last_blkaddr + 1 != cur_blkaddr)
682 return false;
Christoph Hellwig309dca302021-01-24 11:02:34 +0100683 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
Chao Yu8896cbd2019-07-12 16:55:41 +0800684}
685
686static bool io_type_is_mergeable(struct f2fs_bio_info *io,
687 struct f2fs_io_info *fio)
688{
689 if (io->fio.op != fio->op)
690 return false;
691 return io->fio.op_flags == fio->op_flags;
692}
693
694static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
695 struct f2fs_bio_info *io,
696 struct f2fs_io_info *fio,
697 block_t last_blkaddr,
698 block_t cur_blkaddr)
699{
Chao Yuc72db712019-07-12 16:55:42 +0800700 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
701 unsigned int filled_blocks =
702 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
703 unsigned int io_size = F2FS_IO_SIZE(sbi);
704 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
705
706 /* IOs in bio is aligned and left space of vectors is not enough */
707 if (!(filled_blocks % io_size) && left_vecs < io_size)
708 return false;
709 }
Chao Yu8896cbd2019-07-12 16:55:41 +0800710 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
711 return false;
712 return io_type_is_mergeable(io, fio);
713}
714
Chao Yu0b20fce2019-09-30 18:53:25 +0800715static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
716 struct page *page, enum temp_type temp)
717{
718 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
719 struct bio_entry *be;
720
721 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
722 be->bio = bio;
723 bio_get(bio);
724
725 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
726 f2fs_bug_on(sbi, 1);
727
728 down_write(&io->bio_list_lock);
729 list_add_tail(&be->list, &io->bio_list);
730 up_write(&io->bio_list_lock);
731}
732
733static void del_bio_entry(struct bio_entry *be)
734{
735 list_del(&be->list);
736 kmem_cache_free(bio_entry_slab, be);
737}
738
Satya Tangirala27aacd22020-07-02 01:56:06 +0000739static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
Chao Yu0b20fce2019-09-30 18:53:25 +0800740 struct page *page)
741{
Satya Tangirala27aacd22020-07-02 01:56:06 +0000742 struct f2fs_sb_info *sbi = fio->sbi;
Chao Yu0b20fce2019-09-30 18:53:25 +0800743 enum temp_type temp;
744 bool found = false;
745 int ret = -EAGAIN;
746
747 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
748 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
749 struct list_head *head = &io->bio_list;
750 struct bio_entry *be;
751
752 down_write(&io->bio_list_lock);
753 list_for_each_entry(be, head, list) {
754 if (be->bio != *bio)
755 continue;
756
757 found = true;
758
Satya Tangirala27aacd22020-07-02 01:56:06 +0000759 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
760 *fio->last_block,
761 fio->new_blkaddr));
762 if (f2fs_crypt_mergeable_bio(*bio,
763 fio->page->mapping->host,
764 fio->page->index, fio) &&
765 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
766 PAGE_SIZE) {
Chao Yu0b20fce2019-09-30 18:53:25 +0800767 ret = 0;
768 break;
769 }
770
Satya Tangirala27aacd22020-07-02 01:56:06 +0000771 /* page can't be merged into bio; submit the bio */
Chao Yu0b20fce2019-09-30 18:53:25 +0800772 del_bio_entry(be);
773 __submit_bio(sbi, *bio, DATA);
774 break;
775 }
776 up_write(&io->bio_list_lock);
777 }
778
779 if (ret) {
780 bio_put(*bio);
781 *bio = NULL;
782 }
783
784 return ret;
785}
786
787void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
788 struct bio **bio, struct page *page)
789{
790 enum temp_type temp;
791 bool found = false;
792 struct bio *target = bio ? *bio : NULL;
793
794 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
795 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
796 struct list_head *head = &io->bio_list;
797 struct bio_entry *be;
798
799 if (list_empty(head))
800 continue;
801
802 down_read(&io->bio_list_lock);
803 list_for_each_entry(be, head, list) {
804 if (target)
805 found = (target == be->bio);
806 else
807 found = __has_merged_page(be->bio, NULL,
808 page, 0);
809 if (found)
810 break;
811 }
812 up_read(&io->bio_list_lock);
813
814 if (!found)
815 continue;
816
817 found = false;
818
819 down_write(&io->bio_list_lock);
820 list_for_each_entry(be, head, list) {
821 if (target)
822 found = (target == be->bio);
823 else
824 found = __has_merged_page(be->bio, NULL,
825 page, 0);
826 if (found) {
827 target = be->bio;
828 del_bio_entry(be);
829 break;
830 }
831 }
832 up_write(&io->bio_list_lock);
833 }
834
835 if (found)
836 __submit_bio(sbi, target, DATA);
837 if (bio && *bio) {
838 bio_put(*bio);
839 *bio = NULL;
840 }
841}
842
Chao Yu8648de22019-02-19 16:15:29 +0800843int f2fs_merge_page_bio(struct f2fs_io_info *fio)
844{
845 struct bio *bio = *fio->bio;
846 struct page *page = fio->encrypted_page ?
847 fio->encrypted_page : fio->page;
848
849 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
850 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
Chao Yu10f966b2019-06-20 11:36:14 +0800851 return -EFSCORRUPTED;
Chao Yu8648de22019-02-19 16:15:29 +0800852
853 trace_f2fs_submit_page_bio(page, fio);
Chao Yu8648de22019-02-19 16:15:29 +0800854
Chao Yu8896cbd2019-07-12 16:55:41 +0800855 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
Chao Yu0b20fce2019-09-30 18:53:25 +0800856 fio->new_blkaddr))
857 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
Chao Yu8648de22019-02-19 16:15:29 +0800858alloc_new:
859 if (!bio) {
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100860 bio = __bio_alloc(fio, BIO_MAX_VECS);
Jaegeuk Kimb7b911d2020-06-04 16:45:30 -0700861 __attach_io_flag(fio);
Satya Tangirala27aacd22020-07-02 01:56:06 +0000862 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
863 fio->page->index, fio, GFP_NOIO);
Chao Yu8648de22019-02-19 16:15:29 +0800864 bio_set_op_attrs(bio, fio->op, fio->op_flags);
Chao Yu8648de22019-02-19 16:15:29 +0800865
Chao Yu0b20fce2019-09-30 18:53:25 +0800866 add_bio_entry(fio->sbi, bio, page, fio->temp);
867 } else {
Satya Tangirala27aacd22020-07-02 01:56:06 +0000868 if (add_ipu_page(fio, &bio, page))
Chao Yu0b20fce2019-09-30 18:53:25 +0800869 goto alloc_new;
Chao Yu8648de22019-02-19 16:15:29 +0800870 }
871
872 if (fio->io_wbc)
Linus Torvalds9637d512019-07-15 21:20:52 -0700873 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
Chao Yu8648de22019-02-19 16:15:29 +0800874
875 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
876
877 *fio->last_block = fio->new_blkaddr;
878 *fio->bio = bio;
879
880 return 0;
881}
882
Chao Yufe16efe2018-05-28 23:47:18 +0800883void f2fs_submit_page_write(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900884{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700885 struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900886 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
Jaegeuk Kima912b542017-05-10 11:18:25 -0700887 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700888 struct page *bio_page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900889
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700890 f2fs_bug_on(sbi, is_read_io(fio->op));
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900891
Chao Yufb830fc2017-05-19 23:37:01 +0800892 down_write(&io->io_rwsem);
893next:
894 if (fio->in_list) {
895 spin_lock(&io->io_lock);
896 if (list_empty(&io->io_list)) {
897 spin_unlock(&io->io_lock);
Chao Yufe16efe2018-05-28 23:47:18 +0800898 goto out;
Chao Yufb830fc2017-05-19 23:37:01 +0800899 }
900 fio = list_first_entry(&io->io_list,
901 struct f2fs_io_info, list);
902 list_del(&fio->list);
903 spin_unlock(&io->io_lock);
904 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900905
Chao Yu93770ab2019-04-15 15:26:32 +0800906 verify_fio_blkaddr(fio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900907
Chao Yu4c8ff702019-11-01 18:07:14 +0800908 if (fio->encrypted_page)
909 bio_page = fio->encrypted_page;
910 else if (fio->compressed_page)
911 bio_page = fio->compressed_page;
912 else
913 bio_page = fio->page;
Chao Yu36951b32016-11-16 10:41:20 +0800914
Thomas Meyerebf7c522017-10-07 16:02:21 +0200915 /* set submitted = true as a return value */
916 fio->submitted = true;
Jaegeuk Kimd68f7352017-02-03 17:44:04 -0800917
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700918 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900919
Satya Tangirala27aacd22020-07-02 01:56:06 +0000920 if (io->bio &&
921 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
922 fio->new_blkaddr) ||
923 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
924 bio_page->index, fio)))
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900925 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900926alloc_new:
927 if (io->bio == NULL) {
Chao Yu8223ecc2019-08-28 17:33:38 +0800928 if (F2FS_IO_ALIGNED(sbi) &&
929 (fio->type == DATA || fio->type == NODE) &&
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800930 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700931 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
Chao Yufe16efe2018-05-28 23:47:18 +0800932 fio->retry = true;
933 goto skip;
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800934 }
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100935 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
Satya Tangirala27aacd22020-07-02 01:56:06 +0000936 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
937 bio_page->index, fio, GFP_NOIO);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900938 io->fio = *fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900939 }
940
Jaegeuk Kima912b542017-05-10 11:18:25 -0700941 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900942 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900943 goto alloc_new;
944 }
945
Yufen Yu578c6472018-01-09 19:33:39 +0800946 if (fio->io_wbc)
Tejun Heo34e51a52019-06-27 13:39:49 -0700947 wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
Yufen Yu578c6472018-01-09 19:33:39 +0800948
Chao Yu7a9d7542016-02-22 18:36:38 +0800949 io->last_block_in_bio = fio->new_blkaddr;
Chao Yufb830fc2017-05-19 23:37:01 +0800950
951 trace_f2fs_submit_page_write(fio->page, fio);
Chao Yufe16efe2018-05-28 23:47:18 +0800952skip:
Chao Yufb830fc2017-05-19 23:37:01 +0800953 if (fio->in_list)
954 goto next;
Chao Yufe16efe2018-05-28 23:47:18 +0800955out:
Daniel Rosenberg43549942018-08-20 19:21:43 -0700956 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
Chao Yu00e09c02019-08-23 17:58:36 +0800957 !f2fs_is_checkpoint_ready(sbi))
Jaegeuk Kim5ce80582018-09-06 11:40:12 -0700958 __submit_merged_bio(io);
Chao Yudf0f8dc2014-03-22 14:57:23 +0800959 up_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900960}
961
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -0700962static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
Eric Biggers95ae2512019-07-22 09:26:24 -0700963 unsigned nr_pages, unsigned op_flag,
Eric Biggers7f59b272021-01-04 22:33:02 -0800964 pgoff_t first_idx, bool for_write)
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -0700965{
966 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -0700967 struct bio *bio;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700968 struct bio_post_read_ctx *ctx;
969 unsigned int post_read_steps = 0;
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -0700970
Christoph Hellwig67883ad2021-01-26 15:52:38 +0100971 bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
Matthew Wilcox (Oracle)5f7136d2021-01-29 04:38:57 +0000972 bio_max_segs(nr_pages), &f2fs_bioset);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700973 if (!bio)
974 return ERR_PTR(-ENOMEM);
Satya Tangirala27aacd22020-07-02 01:56:06 +0000975
976 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
977
Eric Biggers6dbb1792018-04-18 11:09:48 -0700978 f2fs_target_device(sbi, blkaddr, bio);
979 bio->bi_end_io = f2fs_read_end_io;
Jaegeuk Kime2e59412018-06-21 11:29:43 -0700980 bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700981
Satya Tangirala27aacd22020-07-02 01:56:06 +0000982 if (fscrypt_inode_uses_fs_layer_crypto(inode))
Eric Biggers7f59b272021-01-04 22:33:02 -0800983 post_read_steps |= STEP_DECRYPT;
Eric Biggers95ae2512019-07-22 09:26:24 -0700984
Eric Biggers7f59b272021-01-04 22:33:02 -0800985 if (f2fs_need_verity(inode, first_idx))
986 post_read_steps |= STEP_VERITY;
987
988 /*
989 * STEP_DECOMPRESS is handled specially, since a compressed file might
990 * contain both compressed and uncompressed clusters. We'll allocate a
991 * bio_post_read_ctx if the file is compressed, but the caller is
992 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
993 */
994
995 if (post_read_steps || f2fs_compressed_file(inode)) {
Eric Biggerse8ce5742019-12-31 12:14:56 -0600996 /* Due to the mempool, this never fails. */
Eric Biggers6dbb1792018-04-18 11:09:48 -0700997 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700998 ctx->bio = bio;
Chao Yu4c8ff702019-11-01 18:07:14 +0800999 ctx->sbi = sbi;
Eric Biggers6dbb1792018-04-18 11:09:48 -07001000 ctx->enabled_steps = post_read_steps;
1001 bio->bi_private = ctx;
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001002 }
1003
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001004 return bio;
1005}
1006
1007/* This can handle encryption stuffs */
1008static int f2fs_submit_page_read(struct inode *inode, struct page *page,
Jia Yangb7973092020-07-01 10:27:40 +08001009 block_t blkaddr, int op_flags, bool for_write)
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001010{
Chao Yu93770ab2019-04-15 15:26:32 +08001011 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1012 struct bio *bio;
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001013
Jia Yangb7973092020-07-01 10:27:40 +08001014 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
Eric Biggers7f59b272021-01-04 22:33:02 -08001015 page->index, for_write);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001016 if (IS_ERR(bio))
1017 return PTR_ERR(bio);
1018
Jaegeuk Kim0ded69f2018-08-22 21:18:00 -07001019 /* wait for GCed page writeback via META_MAPPING */
1020 f2fs_wait_on_block_writeback(inode, blkaddr);
1021
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001022 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1023 bio_put(bio);
1024 return -EFAULT;
1025 }
Jaegeuk Kimfb7d70d2018-09-25 13:54:33 -07001026 ClearPageError(page);
Chao Yu93770ab2019-04-15 15:26:32 +08001027 inc_page_count(sbi, F2FS_RD_DATA);
Chao Yu8b83ac82020-04-16 18:16:56 +08001028 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
Chao Yu93770ab2019-04-15 15:26:32 +08001029 __submit_bio(sbi, bio, DATA);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001030 return 0;
1031}
1032
Chao Yu46008c62016-05-09 19:56:30 +08001033static void __set_data_blkaddr(struct dnode_of_data *dn)
1034{
1035 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1036 __le32 *addr_array;
Chao Yu7a2af762017-07-19 00:19:06 +08001037 int base = 0;
1038
1039 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1040 base = get_extra_isize(dn->inode);
Chao Yu46008c62016-05-09 19:56:30 +08001041
1042 /* Get physical address of data block */
1043 addr_array = blkaddr_in_node(rn);
Chao Yu7a2af762017-07-19 00:19:06 +08001044 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
Chao Yu46008c62016-05-09 19:56:30 +08001045}
1046
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001047/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001048 * Lock ordering for the change of data block address:
1049 * ->data_page
1050 * ->node_page
1051 * update block addresses in the node page
1052 */
Chao Yu4d57b862018-05-30 00:20:41 +08001053void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001054{
Chao Yubae0ee72018-12-25 17:43:42 +08001055 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
Chao Yu46008c62016-05-09 19:56:30 +08001056 __set_data_blkaddr(dn);
1057 if (set_page_dirty(dn->node_page))
Jaegeuk Kim12719ae2016-01-07 13:23:12 -08001058 dn->node_changed = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001059}
1060
Chao Yuf28b3432016-02-24 17:16:47 +08001061void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1062{
1063 dn->data_blkaddr = blkaddr;
Chao Yu4d57b862018-05-30 00:20:41 +08001064 f2fs_set_data_blkaddr(dn);
Chao Yuf28b3432016-02-24 17:16:47 +08001065 f2fs_update_extent_cache(dn);
1066}
1067
Chao Yu46008c62016-05-09 19:56:30 +08001068/* dn->ofs_in_node will be returned with up-to-date last block pointer */
Chao Yu4d57b862018-05-30 00:20:41 +08001069int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001070{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001071 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Chao Yu0abd6752017-07-09 00:13:07 +08001072 int err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001073
Chao Yu46008c62016-05-09 19:56:30 +08001074 if (!count)
1075 return 0;
1076
Jaegeuk Kim91942322016-05-20 10:13:22 -07001077 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001078 return -EPERM;
Chao Yu0abd6752017-07-09 00:13:07 +08001079 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1080 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001081
Chao Yu46008c62016-05-09 19:56:30 +08001082 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1083 dn->ofs_in_node, count);
Namjae Jeonc01e2852013-04-23 17:00:52 +09001084
Chao Yubae0ee72018-12-25 17:43:42 +08001085 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
Chao Yu46008c62016-05-09 19:56:30 +08001086
1087 for (; count > 0; dn->ofs_in_node++) {
Chao Yua2ced1c2020-02-14 17:44:10 +08001088 block_t blkaddr = f2fs_data_blkaddr(dn);
Chao Yu46008c62016-05-09 19:56:30 +08001089 if (blkaddr == NULL_ADDR) {
1090 dn->data_blkaddr = NEW_ADDR;
1091 __set_data_blkaddr(dn);
1092 count--;
1093 }
1094 }
1095
1096 if (set_page_dirty(dn->node_page))
1097 dn->node_changed = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001098 return 0;
1099}
1100
Chao Yu46008c62016-05-09 19:56:30 +08001101/* Should keep dn->ofs_in_node unchanged */
Chao Yu4d57b862018-05-30 00:20:41 +08001102int f2fs_reserve_new_block(struct dnode_of_data *dn)
Chao Yu46008c62016-05-09 19:56:30 +08001103{
1104 unsigned int ofs_in_node = dn->ofs_in_node;
1105 int ret;
1106
Chao Yu4d57b862018-05-30 00:20:41 +08001107 ret = f2fs_reserve_new_blocks(dn, 1);
Chao Yu46008c62016-05-09 19:56:30 +08001108 dn->ofs_in_node = ofs_in_node;
1109 return ret;
1110}
1111
Huajun Lib6009652013-11-10 23:13:18 +08001112int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1113{
1114 bool need_put = dn->inode_page ? false : true;
1115 int err;
1116
Chao Yu4d57b862018-05-30 00:20:41 +08001117 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
Huajun Lib6009652013-11-10 23:13:18 +08001118 if (err)
1119 return err;
Jaegeuk Kima8865372013-12-27 17:04:17 +09001120
Huajun Lib6009652013-11-10 23:13:18 +08001121 if (dn->data_blkaddr == NULL_ADDR)
Chao Yu4d57b862018-05-30 00:20:41 +08001122 err = f2fs_reserve_new_block(dn);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001123 if (err || need_put)
Huajun Lib6009652013-11-10 23:13:18 +08001124 f2fs_put_dnode(dn);
1125 return err;
1126}
1127
Fan Li759af1c2015-08-05 15:52:16 +08001128int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001129{
Jack Qiua87aff12020-07-24 16:55:28 +08001130 struct extent_info ei = {0, 0, 0};
Fan Li759af1c2015-08-05 15:52:16 +08001131 struct inode *inode = dn->inode;
Chao Yu028a41e2015-03-19 19:26:02 +08001132
Fan Li759af1c2015-08-05 15:52:16 +08001133 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1134 dn->data_blkaddr = ei.blk + index - ei.fofs;
1135 return 0;
Chao Yu429511c2015-02-05 17:54:31 +08001136 }
1137
Fan Li759af1c2015-08-05 15:52:16 +08001138 return f2fs_reserve_block(dn, index);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001139}
1140
Chao Yu4d57b862018-05-30 00:20:41 +08001141struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
Mike Christie04d328d2016-06-05 14:31:55 -05001142 int op_flags, bool for_write)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001143{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001144 struct address_space *mapping = inode->i_mapping;
1145 struct dnode_of_data dn;
1146 struct page *page;
Hou Pengyange15882b2017-02-23 09:18:05 +00001147 struct extent_info ei = {0,0,0};
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001148 int err;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001149
Jaegeuk Kima56c7c62015-10-09 15:11:38 -07001150 page = f2fs_grab_cache_page(mapping, index, for_write);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001151 if (!page)
1152 return ERR_PTR(-ENOMEM);
1153
Chao Yucb3bc9e2015-02-05 18:03:40 +08001154 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1155 dn.data_blkaddr = ei.blk + index - ei.fofs;
Chao Yu93770ab2019-04-15 15:26:32 +08001156 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1157 DATA_GENERIC_ENHANCE_READ)) {
Chao Yu10f966b2019-06-20 11:36:14 +08001158 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +08001159 goto put_err;
1160 }
Chao Yucb3bc9e2015-02-05 18:03:40 +08001161 goto got_it;
1162 }
1163
Jaegeuk Kim650495d2013-05-13 08:38:35 +09001164 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001165 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001166 if (err)
1167 goto put_err;
Jaegeuk Kim650495d2013-05-13 08:38:35 +09001168 f2fs_put_dnode(&dn);
1169
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001170 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001171 err = -ENOENT;
1172 goto put_err;
Jaegeuk Kim650495d2013-05-13 08:38:35 +09001173 }
Chao Yu93770ab2019-04-15 15:26:32 +08001174 if (dn.data_blkaddr != NEW_ADDR &&
1175 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1176 dn.data_blkaddr,
1177 DATA_GENERIC_ENHANCE)) {
Chao Yu10f966b2019-06-20 11:36:14 +08001178 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +08001179 goto put_err;
1180 }
Chao Yucb3bc9e2015-02-05 18:03:40 +08001181got_it:
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001182 if (PageUptodate(page)) {
1183 unlock_page(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001184 return page;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001185 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001186
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +09001187 /*
1188 * A new dentry page is allocated but not able to be written, since its
1189 * new inode page couldn't be allocated due to -ENOSPC.
1190 * In such the case, its blkaddr can be remained as NEW_ADDR.
Chao Yu4d57b862018-05-30 00:20:41 +08001191 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1192 * f2fs_init_inode_metadata.
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +09001193 */
1194 if (dn.data_blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001195 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim237c0792016-06-30 18:49:15 -07001196 if (!PageUptodate(page))
1197 SetPageUptodate(page);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001198 unlock_page(page);
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +09001199 return page;
1200 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001201
Jia Yangb7973092020-07-01 10:27:40 +08001202 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1203 op_flags, for_write);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001204 if (err)
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001205 goto put_err;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001206 return page;
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001207
1208put_err:
1209 f2fs_put_page(page, 1);
1210 return ERR_PTR(err);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001211}
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001212
Chao Yu4d57b862018-05-30 00:20:41 +08001213struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001214{
1215 struct address_space *mapping = inode->i_mapping;
1216 struct page *page;
1217
1218 page = find_get_page(mapping, index);
1219 if (page && PageUptodate(page))
1220 return page;
1221 f2fs_put_page(page, 0);
1222
Chao Yu4d57b862018-05-30 00:20:41 +08001223 page = f2fs_get_read_data_page(inode, index, 0, false);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001224 if (IS_ERR(page))
1225 return page;
1226
1227 if (PageUptodate(page))
1228 return page;
1229
1230 wait_on_page_locked(page);
1231 if (unlikely(!PageUptodate(page))) {
1232 f2fs_put_page(page, 0);
1233 return ERR_PTR(-EIO);
1234 }
1235 return page;
1236}
1237
1238/*
1239 * If it tries to access a hole, return an error.
1240 * Because, the callers, functions in dir.c and GC, should be able to know
1241 * whether this page exists or not.
1242 */
Chao Yu4d57b862018-05-30 00:20:41 +08001243struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
Jaegeuk Kima56c7c62015-10-09 15:11:38 -07001244 bool for_write)
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001245{
1246 struct address_space *mapping = inode->i_mapping;
1247 struct page *page;
1248repeat:
Chao Yu4d57b862018-05-30 00:20:41 +08001249 page = f2fs_get_read_data_page(inode, index, 0, for_write);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001250 if (IS_ERR(page))
1251 return page;
1252
1253 /* wait for read completion */
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001254 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001255 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001256 f2fs_put_page(page, 1);
1257 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001258 }
Chao Yu1563ac72016-07-03 22:05:12 +08001259 if (unlikely(!PageUptodate(page))) {
1260 f2fs_put_page(page, 1);
1261 return ERR_PTR(-EIO);
1262 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001263 return page;
1264}
1265
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001266/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001267 * Caller ensures that this data page is never allocated.
1268 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +09001269 *
Chao Yu4f4124d2013-12-21 18:02:14 +08001270 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1271 * f2fs_unlock_op().
Chao Yu470f00e2015-07-14 18:14:06 +08001272 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1273 * ipage should be released by this function.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001274 */
Chao Yu4d57b862018-05-30 00:20:41 +08001275struct page *f2fs_get_new_data_page(struct inode *inode,
Jaegeuk Kima8865372013-12-27 17:04:17 +09001276 struct page *ipage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001277{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001278 struct address_space *mapping = inode->i_mapping;
1279 struct page *page;
1280 struct dnode_of_data dn;
1281 int err;
Jaegeuk Kim76121182016-01-01 22:03:47 -08001282
Jaegeuk Kima56c7c62015-10-09 15:11:38 -07001283 page = f2fs_grab_cache_page(mapping, index, true);
Chao Yu470f00e2015-07-14 18:14:06 +08001284 if (!page) {
1285 /*
1286 * before exiting, we should make sure ipage will be released
1287 * if any error occur.
1288 */
1289 f2fs_put_page(ipage, 1);
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001290 return ERR_PTR(-ENOMEM);
Chao Yu470f00e2015-07-14 18:14:06 +08001291 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001292
Jaegeuk Kima8865372013-12-27 17:04:17 +09001293 set_new_dnode(&dn, inode, ipage, NULL, 0);
Huajun Lib6009652013-11-10 23:13:18 +08001294 err = f2fs_reserve_block(&dn, index);
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001295 if (err) {
1296 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001297 return ERR_PTR(err);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001298 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001299 if (!ipage)
1300 f2fs_put_dnode(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001301
1302 if (PageUptodate(page))
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001303 goto got_it;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001304
1305 if (dn.data_blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001306 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim237c0792016-06-30 18:49:15 -07001307 if (!PageUptodate(page))
1308 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001309 } else {
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001310 f2fs_put_page(page, 1);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001311
Jaegeuk Kim76121182016-01-01 22:03:47 -08001312 /* if ipage exists, blkaddr should be NEW_ADDR */
1313 f2fs_bug_on(F2FS_I_SB(inode), ipage);
Chao Yu4d57b862018-05-30 00:20:41 +08001314 page = f2fs_get_lock_data_page(inode, index, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001315 if (IS_ERR(page))
Jaegeuk Kim76121182016-01-01 22:03:47 -08001316 return page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001317 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001318got_it:
Chao Yu9edcdab2015-09-11 14:43:52 +08001319 if (new_i_size && i_size_read(inode) <
Jaegeuk Kimee6d1822016-05-20 16:32:49 -07001320 ((loff_t)(index + 1) << PAGE_SHIFT))
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07001321 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001322 return page;
1323}
1324
Hyunchul Leed5097be2017-11-28 09:23:00 +09001325static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001326{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001327 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001328 struct f2fs_summary sum;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001329 struct node_info ni;
Chao Yu6aa58d82018-08-14 22:37:25 +08001330 block_t old_blkaddr;
Chao Yu46008c62016-05-09 19:56:30 +08001331 blkcnt_t count = 1;
Chao Yu0abd6752017-07-09 00:13:07 +08001332 int err;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001333
Jaegeuk Kim91942322016-05-20 10:13:22 -07001334 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001335 return -EPERM;
Chao Yudf6136e2015-03-23 10:33:37 +08001336
Chao Yu77357302018-07-17 00:02:17 +08001337 err = f2fs_get_node_info(sbi, dn->nid, &ni);
1338 if (err)
1339 return err;
1340
Chao Yua2ced1c2020-02-14 17:44:10 +08001341 dn->data_blkaddr = f2fs_data_blkaddr(dn);
Chao Yuf847c692018-09-27 18:34:52 +08001342 if (dn->data_blkaddr != NULL_ADDR)
Chao Yudf6136e2015-03-23 10:33:37 +08001343 goto alloc;
1344
Chao Yu0abd6752017-07-09 00:13:07 +08001345 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1346 return err;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001347
Chao Yudf6136e2015-03-23 10:33:37 +08001348alloc:
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001349 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
Chao Yu6aa58d82018-08-14 22:37:25 +08001350 old_blkaddr = dn->data_blkaddr;
1351 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
Chao Yu093749e2020-08-04 21:14:49 +08001352 &sum, seg_type, NULL);
Chao Yu6aa58d82018-08-14 22:37:25 +08001353 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1354 invalidate_mapping_pages(META_MAPPING(sbi),
1355 old_blkaddr, old_blkaddr);
Chao Yu86f35dc2019-08-28 17:33:35 +08001356 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001357
Jaegeuk Kim0a4daae2018-09-19 15:28:40 -07001358 /*
1359 * i_size will be updated by direct_IO. Otherwise, we'll get stale
1360 * data from unwritten block via dio_read.
1361 */
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001362 return 0;
1363}
1364
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001365int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001366{
Jaegeuk Kimb439b102016-02-03 13:09:09 -08001367 struct inode *inode = file_inode(iocb->ki_filp);
Chao Yu5b8db7f2016-01-26 15:38:29 +08001368 struct f2fs_map_blocks map;
Chao Yud6d478a12018-01-03 17:30:19 +08001369 int flag;
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001370 int err = 0;
Chao Yud6d478a12018-01-03 17:30:19 +08001371 bool direct_io = iocb->ki_flags & IOCB_DIRECT;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001372
Jaegeuk Kim0080c502016-05-07 08:52:57 -07001373 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
Chao Yudfd02e42016-08-20 15:12:01 +08001374 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1375 if (map.m_len > map.m_lblk)
1376 map.m_len -= map.m_lblk;
1377 else
1378 map.m_len = 0;
1379
Chao Yuda859852016-01-26 15:42:58 +08001380 map.m_next_pgofs = NULL;
Chao Yuc4020b22018-01-11 14:42:30 +08001381 map.m_next_extent = NULL;
Hyunchul Leed5097be2017-11-28 09:23:00 +09001382 map.m_seg_type = NO_CHECK_TYPE;
Chao Yuf9d6d052018-11-13 14:33:45 +08001383 map.m_may_create = true;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001384
Chao Yud6d478a12018-01-03 17:30:19 +08001385 if (direct_io) {
Chao Yu4d57b862018-05-30 00:20:41 +08001386 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
Chao Yuf847c692018-09-27 18:34:52 +08001387 flag = f2fs_force_buffered_io(inode, iocb, from) ?
Chao Yud6d478a12018-01-03 17:30:19 +08001388 F2FS_GET_BLOCK_PRE_AIO :
1389 F2FS_GET_BLOCK_PRE_DIO;
1390 goto map_blocks;
Hyunchul Leed5097be2017-11-28 09:23:00 +09001391 }
Chao Yuf2470372017-07-19 00:19:05 +08001392 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001393 err = f2fs_convert_inline_inode(inode);
1394 if (err)
1395 return err;
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001396 }
Chao Yud6d478a12018-01-03 17:30:19 +08001397 if (f2fs_has_inline_data(inode))
Sheng Yong250066452017-11-22 18:23:39 +08001398 return err;
Chao Yud6d478a12018-01-03 17:30:19 +08001399
1400 flag = F2FS_GET_BLOCK_PRE_AIO;
1401
1402map_blocks:
1403 err = f2fs_map_blocks(inode, &map, 1, flag);
1404 if (map.m_len > 0 && err == -ENOSPC) {
1405 if (!direct_io)
1406 set_inode_flag(inode, FI_NO_PREALLOC);
1407 err = 0;
Sheng Yong250066452017-11-22 18:23:39 +08001408 }
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001409 return err;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001410}
1411
Chao Yu0ef81832020-06-18 14:36:22 +08001412void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
Yunlei He59c90812017-03-13 20:22:18 +08001413{
1414 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1415 if (lock)
1416 down_read(&sbi->node_change);
1417 else
1418 up_read(&sbi->node_change);
1419 } else {
1420 if (lock)
1421 f2fs_lock_op(sbi);
1422 else
1423 f2fs_unlock_op(sbi);
1424 }
1425}
1426
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001427/*
Chao Yu7a88ddb2020-02-27 19:30:05 +08001428 * f2fs_map_blocks() tries to find or build mapping relationship which
1429 * maps continuous logical blocks to physical blocks, and return such
1430 * info via f2fs_map_blocks structure.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001431 */
Chao Yud323d002015-10-27 09:53:45 +08001432int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
Chao Yue2b4e2b2015-08-19 19:11:19 +08001433 int create, int flag)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001434{
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001435 unsigned int maxblocks = map->m_len;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001436 struct dnode_of_data dn;
Chao Yuf9811702015-09-21 20:17:52 +08001437 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuf9d6d052018-11-13 14:33:45 +08001438 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
Chao Yu46008c62016-05-09 19:56:30 +08001439 pgoff_t pgofs, end_offset, end;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001440 int err = 0, ofs = 1;
Chao Yu46008c62016-05-09 19:56:30 +08001441 unsigned int ofs_in_node, last_ofs_in_node;
1442 blkcnt_t prealloc;
Hou Pengyange15882b2017-02-23 09:18:05 +00001443 struct extent_info ei = {0,0,0};
Fan Li7df3a432015-12-17 13:20:59 +08001444 block_t blkaddr;
Chao Yuc4020b22018-01-11 14:42:30 +08001445 unsigned int start_pgofs;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001446
Chao Yudfd02e42016-08-20 15:12:01 +08001447 if (!maxblocks)
1448 return 0;
1449
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001450 map->m_len = 0;
1451 map->m_flags = 0;
1452
1453 /* it only supports block size == page size */
1454 pgofs = (pgoff_t)map->m_lblk;
Chao Yu46008c62016-05-09 19:56:30 +08001455 end = pgofs + maxblocks;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001456
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001457 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
Chao Yub0332a02020-02-14 17:44:12 +08001458 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
Jia Zhuf4f0b672018-11-20 04:29:35 +08001459 map->m_may_create)
1460 goto next_dnode;
1461
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001462 map->m_pblk = ei.blk + pgofs - ei.fofs;
1463 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1464 map->m_flags = F2FS_MAP_MAPPED;
Chao Yuc4020b22018-01-11 14:42:30 +08001465 if (map->m_next_extent)
1466 *map->m_next_extent = pgofs + map->m_len;
Sahitya Tummala1e78e8b2018-10-10 10:56:22 +05301467
1468 /* for hardware encryption, but to avoid potential issue in future */
1469 if (flag == F2FS_GET_BLOCK_DIO)
1470 f2fs_wait_on_block_writeback_range(inode,
1471 map->m_pblk, map->m_len);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001472 goto out;
Chao Yua2e7d1b2015-02-05 17:50:30 +08001473 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001474
Chao Yu4fe71e82016-01-26 15:37:38 +08001475next_dnode:
Chao Yuf9d6d052018-11-13 14:33:45 +08001476 if (map->m_may_create)
Chao Yu0ef81832020-06-18 14:36:22 +08001477 f2fs_do_map_lock(sbi, flag, true);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001478
1479 /* When reading holes, we need its node page */
1480 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001481 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001482 if (err) {
Chao Yu43473f92016-05-05 19:13:02 +08001483 if (flag == F2FS_GET_BLOCK_BMAP)
1484 map->m_pblk = 0;
Chao Yuda859852016-01-26 15:42:58 +08001485 if (err == -ENOENT) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001486 err = 0;
Chao Yuda859852016-01-26 15:42:58 +08001487 if (map->m_next_pgofs)
1488 *map->m_next_pgofs =
Chao Yu4d57b862018-05-30 00:20:41 +08001489 f2fs_get_next_page_offset(&dn, pgofs);
Chao Yuc4020b22018-01-11 14:42:30 +08001490 if (map->m_next_extent)
1491 *map->m_next_extent =
Chao Yu4d57b862018-05-30 00:20:41 +08001492 f2fs_get_next_page_offset(&dn, pgofs);
Chao Yuda859852016-01-26 15:42:58 +08001493 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001494 goto unlock_out;
Namjae Jeon848753a2013-04-23 16:38:02 +09001495 }
Chao Yu973163f2015-09-18 16:51:51 +08001496
Chao Yuc4020b22018-01-11 14:42:30 +08001497 start_pgofs = pgofs;
Chao Yu46008c62016-05-09 19:56:30 +08001498 prealloc = 0;
Arnd Bergmann230436b2016-11-02 14:52:15 +01001499 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
Chao Yu81ca7352016-01-26 15:39:35 +08001500 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001501
Chao Yu4fe71e82016-01-26 15:37:38 +08001502next_block:
Chao Yua2ced1c2020-02-14 17:44:10 +08001503 blkaddr = f2fs_data_blkaddr(&dn);
Chao Yu973163f2015-09-18 16:51:51 +08001504
Chao Yuc9b60782018-08-01 19:13:44 +08001505 if (__is_valid_data_blkaddr(blkaddr) &&
Chao Yu93770ab2019-04-15 15:26:32 +08001506 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
Chao Yu10f966b2019-06-20 11:36:14 +08001507 err = -EFSCORRUPTED;
Chao Yuc9b60782018-08-01 19:13:44 +08001508 goto sync_out;
1509 }
1510
Chao Yu93770ab2019-04-15 15:26:32 +08001511 if (__is_valid_data_blkaddr(blkaddr)) {
Chao Yuf847c692018-09-27 18:34:52 +08001512 /* use out-place-update for driect IO under LFS mode */
Chao Yub0332a02020-02-14 17:44:12 +08001513 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
Chao Yuf9d6d052018-11-13 14:33:45 +08001514 map->m_may_create) {
Chao Yuf847c692018-09-27 18:34:52 +08001515 err = __allocate_data_block(&dn, map->m_seg_type);
Chao Yu05e3600612019-08-28 17:33:36 +08001516 if (err)
1517 goto sync_out;
1518 blkaddr = dn.data_blkaddr;
1519 set_inode_flag(inode, FI_APPEND_WRITE);
Chao Yuf847c692018-09-27 18:34:52 +08001520 }
1521 } else {
Fan Li7df3a432015-12-17 13:20:59 +08001522 if (create) {
1523 if (unlikely(f2fs_cp_error(sbi))) {
1524 err = -EIO;
1525 goto sync_out;
Chao Yu973163f2015-09-18 16:51:51 +08001526 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001527 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
Chao Yu46008c62016-05-09 19:56:30 +08001528 if (blkaddr == NULL_ADDR) {
1529 prealloc++;
1530 last_ofs_in_node = dn.ofs_in_node;
1531 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001532 } else {
Jaegeuk Kim0a4daae2018-09-19 15:28:40 -07001533 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1534 flag != F2FS_GET_BLOCK_DIO);
Hyunchul Leed5097be2017-11-28 09:23:00 +09001535 err = __allocate_data_block(&dn,
1536 map->m_seg_type);
Chao Yu6f2d8ed2016-10-11 22:57:03 +08001537 if (!err)
Jaegeuk Kim91942322016-05-20 10:13:22 -07001538 set_inode_flag(inode, FI_APPEND_WRITE);
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001539 }
Fan Li7df3a432015-12-17 13:20:59 +08001540 if (err)
1541 goto sync_out;
Kinglong Mee3f2be042017-02-23 19:55:05 +08001542 map->m_flags |= F2FS_MAP_NEW;
Fan Li7df3a432015-12-17 13:20:59 +08001543 blkaddr = dn.data_blkaddr;
1544 } else {
Chao Yu43473f92016-05-05 19:13:02 +08001545 if (flag == F2FS_GET_BLOCK_BMAP) {
1546 map->m_pblk = 0;
1547 goto sync_out;
1548 }
Chao Yuc4020b22018-01-11 14:42:30 +08001549 if (flag == F2FS_GET_BLOCK_PRECACHE)
1550 goto sync_out;
Chao Yuda859852016-01-26 15:42:58 +08001551 if (flag == F2FS_GET_BLOCK_FIEMAP &&
1552 blkaddr == NULL_ADDR) {
1553 if (map->m_next_pgofs)
1554 *map->m_next_pgofs = pgofs + 1;
Fan Li7df3a432015-12-17 13:20:59 +08001555 goto sync_out;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001556 }
Chao Yuf3d98e72018-01-10 18:18:52 +08001557 if (flag != F2FS_GET_BLOCK_FIEMAP) {
1558 /* for defragment case */
1559 if (map->m_next_pgofs)
1560 *map->m_next_pgofs = pgofs + 1;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001561 goto sync_out;
Chao Yuf3d98e72018-01-10 18:18:52 +08001562 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001563 }
1564 }
Fan Li7df3a432015-12-17 13:20:59 +08001565
Chao Yu46008c62016-05-09 19:56:30 +08001566 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1567 goto skip;
1568
Chao Yu4fe71e82016-01-26 15:37:38 +08001569 if (map->m_len == 0) {
1570 /* preallocated unwritten block should be mapped for fiemap. */
1571 if (blkaddr == NEW_ADDR)
1572 map->m_flags |= F2FS_MAP_UNWRITTEN;
1573 map->m_flags |= F2FS_MAP_MAPPED;
1574
1575 map->m_pblk = blkaddr;
1576 map->m_len = 1;
1577 } else if ((map->m_pblk != NEW_ADDR &&
Fan Li7df3a432015-12-17 13:20:59 +08001578 blkaddr == (map->m_pblk + ofs)) ||
Jaegeuk Kimb439b102016-02-03 13:09:09 -08001579 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
Chao Yu46008c62016-05-09 19:56:30 +08001580 flag == F2FS_GET_BLOCK_PRE_DIO) {
Fan Li7df3a432015-12-17 13:20:59 +08001581 ofs++;
Fan Li7df3a432015-12-17 13:20:59 +08001582 map->m_len++;
Chao Yu4fe71e82016-01-26 15:37:38 +08001583 } else {
1584 goto sync_out;
1585 }
1586
Chao Yu46008c62016-05-09 19:56:30 +08001587skip:
Chao Yu4fe71e82016-01-26 15:37:38 +08001588 dn.ofs_in_node++;
1589 pgofs++;
1590
Chao Yu46008c62016-05-09 19:56:30 +08001591 /* preallocate blocks in batch for one dnode page */
1592 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1593 (pgofs == end || dn.ofs_in_node == end_offset)) {
Chao Yu4fe71e82016-01-26 15:37:38 +08001594
Chao Yu46008c62016-05-09 19:56:30 +08001595 dn.ofs_in_node = ofs_in_node;
Chao Yu4d57b862018-05-30 00:20:41 +08001596 err = f2fs_reserve_new_blocks(&dn, prealloc);
Chao Yu46008c62016-05-09 19:56:30 +08001597 if (err)
1598 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +08001599
Chao Yu46008c62016-05-09 19:56:30 +08001600 map->m_len += dn.ofs_in_node - ofs_in_node;
1601 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1602 err = -ENOSPC;
1603 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +08001604 }
Chao Yu46008c62016-05-09 19:56:30 +08001605 dn.ofs_in_node = end_offset;
Fan Li7df3a432015-12-17 13:20:59 +08001606 }
1607
Chao Yu46008c62016-05-09 19:56:30 +08001608 if (pgofs >= end)
1609 goto sync_out;
1610 else if (dn.ofs_in_node < end_offset)
1611 goto next_block;
1612
Chao Yuc4020b22018-01-11 14:42:30 +08001613 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1614 if (map->m_flags & F2FS_MAP_MAPPED) {
1615 unsigned int ofs = start_pgofs - map->m_lblk;
1616
1617 f2fs_update_extent_cache_range(&dn,
1618 start_pgofs, map->m_pblk + ofs,
1619 map->m_len - ofs);
1620 }
1621 }
1622
Chao Yu46008c62016-05-09 19:56:30 +08001623 f2fs_put_dnode(&dn);
1624
Chao Yuf9d6d052018-11-13 14:33:45 +08001625 if (map->m_may_create) {
Chao Yu0ef81832020-06-18 14:36:22 +08001626 f2fs_do_map_lock(sbi, flag, false);
Chao Yu6f2d8ed2016-10-11 22:57:03 +08001627 f2fs_balance_fs(sbi, dn.node_changed);
Chao Yu46008c62016-05-09 19:56:30 +08001628 }
Chao Yu46008c62016-05-09 19:56:30 +08001629 goto next_dnode;
1630
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001631sync_out:
Sahitya Tummala1e78e8b2018-10-10 10:56:22 +05301632
1633 /* for hardware encryption, but to avoid potential issue in future */
1634 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1635 f2fs_wait_on_block_writeback_range(inode,
1636 map->m_pblk, map->m_len);
1637
Chao Yuc4020b22018-01-11 14:42:30 +08001638 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1639 if (map->m_flags & F2FS_MAP_MAPPED) {
1640 unsigned int ofs = start_pgofs - map->m_lblk;
1641
1642 f2fs_update_extent_cache_range(&dn,
1643 start_pgofs, map->m_pblk + ofs,
1644 map->m_len - ofs);
1645 }
1646 if (map->m_next_extent)
1647 *map->m_next_extent = pgofs + 1;
1648 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001649 f2fs_put_dnode(&dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001650unlock_out:
Chao Yuf9d6d052018-11-13 14:33:45 +08001651 if (map->m_may_create) {
Chao Yu0ef81832020-06-18 14:36:22 +08001652 f2fs_do_map_lock(sbi, flag, false);
Chao Yu6f2d8ed2016-10-11 22:57:03 +08001653 f2fs_balance_fs(sbi, dn.node_changed);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001654 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001655out:
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001656 trace_f2fs_map_blocks(inode, map, err);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001657 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001658}
1659
Hyunchul Leeb91050a2018-03-08 19:34:38 +09001660bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1661{
1662 struct f2fs_map_blocks map;
1663 block_t last_lblk;
1664 int err;
1665
1666 if (pos + len > i_size_read(inode))
1667 return false;
1668
1669 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1670 map.m_next_pgofs = NULL;
1671 map.m_next_extent = NULL;
1672 map.m_seg_type = NO_CHECK_TYPE;
Jia Zhuf4f0b672018-11-20 04:29:35 +08001673 map.m_may_create = false;
Hyunchul Leeb91050a2018-03-08 19:34:38 +09001674 last_lblk = F2FS_BLK_ALIGN(pos + len);
1675
1676 while (map.m_lblk < last_lblk) {
1677 map.m_len = last_lblk - map.m_lblk;
1678 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1679 if (err || map.m_len == 0)
1680 return false;
1681 map.m_lblk += map.m_len;
1682 }
1683 return true;
1684}
1685
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08001686static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1687{
1688 return (bytes >> inode->i_blkbits);
1689}
1690
1691static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1692{
1693 return (blks << inode->i_blkbits);
1694}
1695
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001696static int __get_data_block(struct inode *inode, sector_t iblock,
Chao Yuda859852016-01-26 15:42:58 +08001697 struct buffer_head *bh, int create, int flag,
Chao Yuf9d6d052018-11-13 14:33:45 +08001698 pgoff_t *next_pgofs, int seg_type, bool may_write)
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001699{
1700 struct f2fs_map_blocks map;
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001701 int err;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001702
1703 map.m_lblk = iblock;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08001704 map.m_len = bytes_to_blks(inode, bh->b_size);
Chao Yuda859852016-01-26 15:42:58 +08001705 map.m_next_pgofs = next_pgofs;
Chao Yuc4020b22018-01-11 14:42:30 +08001706 map.m_next_extent = NULL;
Hyunchul Leed5097be2017-11-28 09:23:00 +09001707 map.m_seg_type = seg_type;
Chao Yuf9d6d052018-11-13 14:33:45 +08001708 map.m_may_create = may_write;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001709
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001710 err = f2fs_map_blocks(inode, &map, create, flag);
1711 if (!err) {
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001712 map_bh(bh, inode->i_sb, map.m_pblk);
1713 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08001714 bh->b_size = blks_to_bytes(inode, map.m_len);
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001715 }
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001716 return err;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001717}
1718
Chao Yuf9d6d052018-11-13 14:33:45 +08001719static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1720 struct buffer_head *bh_result, int create)
1721{
1722 return __get_data_block(inode, iblock, bh_result, create,
1723 F2FS_GET_BLOCK_DIO, NULL,
1724 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
Jaegeuk Kim75a037f2019-07-31 13:27:05 -07001725 IS_SWAPFILE(inode) ? false : true);
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001726}
1727
Chao Yue2b4e2b2015-08-19 19:11:19 +08001728static int get_data_block_dio(struct inode *inode, sector_t iblock,
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001729 struct buffer_head *bh_result, int create)
1730{
Chao Yue2b4e2b2015-08-19 19:11:19 +08001731 return __get_data_block(inode, iblock, bh_result, create,
Chao Yuf9d6d052018-11-13 14:33:45 +08001732 F2FS_GET_BLOCK_DIO, NULL,
1733 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1734 false);
Chao Yue2b4e2b2015-08-19 19:11:19 +08001735}
1736
Chao Yu442a9db2018-01-11 14:39:57 +08001737static int f2fs_xattr_fiemap(struct inode *inode,
1738 struct fiemap_extent_info *fieinfo)
1739{
1740 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1741 struct page *page;
1742 struct node_info ni;
1743 __u64 phys = 0, len;
1744 __u32 flags;
1745 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1746 int err = 0;
1747
1748 if (f2fs_has_inline_xattr(inode)) {
1749 int offset;
1750
1751 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1752 inode->i_ino, false);
1753 if (!page)
1754 return -ENOMEM;
1755
Chao Yu77357302018-07-17 00:02:17 +08001756 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1757 if (err) {
1758 f2fs_put_page(page, 1);
1759 return err;
1760 }
Chao Yu442a9db2018-01-11 14:39:57 +08001761
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001762 phys = blks_to_bytes(inode, ni.blk_addr);
Chao Yu442a9db2018-01-11 14:39:57 +08001763 offset = offsetof(struct f2fs_inode, i_addr) +
1764 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
Chao Yub323fd22018-01-17 16:31:36 +08001765 get_inline_xattr_addrs(inode));
Chao Yu442a9db2018-01-11 14:39:57 +08001766
1767 phys += offset;
1768 len = inline_xattr_size(inode);
1769
1770 f2fs_put_page(page, 1);
1771
1772 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1773
1774 if (!xnid)
1775 flags |= FIEMAP_EXTENT_LAST;
1776
1777 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
Chao Yudd5a09b2020-06-29 20:13:13 +08001778 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
Chao Yu442a9db2018-01-11 14:39:57 +08001779 if (err || err == 1)
1780 return err;
1781 }
1782
1783 if (xnid) {
1784 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1785 if (!page)
1786 return -ENOMEM;
1787
Chao Yu77357302018-07-17 00:02:17 +08001788 err = f2fs_get_node_info(sbi, xnid, &ni);
1789 if (err) {
1790 f2fs_put_page(page, 1);
1791 return err;
1792 }
Chao Yu442a9db2018-01-11 14:39:57 +08001793
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001794 phys = blks_to_bytes(inode, ni.blk_addr);
Chao Yu442a9db2018-01-11 14:39:57 +08001795 len = inode->i_sb->s_blocksize;
1796
1797 f2fs_put_page(page, 1);
1798
1799 flags = FIEMAP_EXTENT_LAST;
1800 }
1801
Chao Yudd5a09b2020-06-29 20:13:13 +08001802 if (phys) {
Chao Yu442a9db2018-01-11 14:39:57 +08001803 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
Chao Yudd5a09b2020-06-29 20:13:13 +08001804 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1805 }
Chao Yu442a9db2018-01-11 14:39:57 +08001806
1807 return (err < 0 ? err : 0);
1808}
1809
Chao Yubf38fba2020-03-28 17:40:40 +08001810static loff_t max_inode_blocks(struct inode *inode)
1811{
1812 loff_t result = ADDRS_PER_INODE(inode);
1813 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1814
1815 /* two direct node blocks */
1816 result += (leaf_count * 2);
1817
1818 /* two indirect node blocks */
1819 leaf_count *= NIDS_PER_BLOCK;
1820 result += (leaf_count * 2);
1821
1822 /* one double indirect node block */
1823 leaf_count *= NIDS_PER_BLOCK;
1824 result += leaf_count;
1825
1826 return result;
1827}
1828
Jaegeuk Kim9ab701342014-06-08 04:30:14 +09001829int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1830 u64 start, u64 len)
1831{
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08001832 struct f2fs_map_blocks map;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001833 sector_t start_blk, last_blk;
Chao Yuda859852016-01-26 15:42:58 +08001834 pgoff_t next_pgofs;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001835 u64 logical = 0, phys = 0, size = 0;
1836 u32 flags = 0;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001837 int ret = 0;
Chao Yubf38fba2020-03-28 17:40:40 +08001838 bool compr_cluster = false;
1839 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001840
Chao Yuc4020b22018-01-11 14:42:30 +08001841 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1842 ret = f2fs_precache_extents(inode);
1843 if (ret)
1844 return ret;
1845 }
1846
Christoph Hellwig45dd0522020-05-23 09:30:14 +02001847 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001848 if (ret)
1849 return ret;
1850
Chao Yuf1b43d42018-01-11 14:37:35 +08001851 inode_lock(inode);
1852
Chao Yu442a9db2018-01-11 14:39:57 +08001853 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1854 ret = f2fs_xattr_fiemap(inode, fieinfo);
1855 goto out;
1856 }
1857
Chao Yu7975f342019-07-22 18:03:50 +08001858 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
Jaegeuk Kim67f8cf32015-10-15 11:34:49 -07001859 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1860 if (ret != -EAGAIN)
Chao Yuf1b43d42018-01-11 14:37:35 +08001861 goto out;
Jaegeuk Kim67f8cf32015-10-15 11:34:49 -07001862 }
1863
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001864 if (bytes_to_blks(inode, len) == 0)
1865 len = blks_to_bytes(inode, 1);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001866
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001867 start_blk = bytes_to_blks(inode, start);
1868 last_blk = bytes_to_blks(inode, start + len - 1);
Fan Li9a950d52015-12-26 18:07:41 +08001869
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001870next:
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08001871 memset(&map, 0, sizeof(map));
1872 map.m_lblk = start_blk;
1873 map.m_len = bytes_to_blks(inode, len);
1874 map.m_next_pgofs = &next_pgofs;
1875 map.m_seg_type = NO_CHECK_TYPE;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001876
Chao Yubf38fba2020-03-28 17:40:40 +08001877 if (compr_cluster)
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08001878 map.m_len = cluster_size - 1;
Chao Yubf38fba2020-03-28 17:40:40 +08001879
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08001880 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001881 if (ret)
1882 goto out;
1883
1884 /* HOLE */
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08001885 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
Chao Yuda859852016-01-26 15:42:58 +08001886 start_blk = next_pgofs;
Chao Yu58736fa2016-10-11 22:57:04 +08001887
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001888 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
Chao Yubf38fba2020-03-28 17:40:40 +08001889 max_inode_blocks(inode)))
Fan Li9a950d52015-12-26 18:07:41 +08001890 goto prep_next;
Chao Yu58736fa2016-10-11 22:57:04 +08001891
Fan Li9a950d52015-12-26 18:07:41 +08001892 flags |= FIEMAP_EXTENT_LAST;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001893 }
Fan Li9a950d52015-12-26 18:07:41 +08001894
Chao Yuda5af122016-01-08 20:19:27 +08001895 if (size) {
Chao Yu0953fe82020-12-14 17:20:57 +08001896 flags |= FIEMAP_EXTENT_MERGED;
Chandan Rajendra62230e0d2018-12-12 15:20:11 +05301897 if (IS_ENCRYPTED(inode))
Chao Yuda5af122016-01-08 20:19:27 +08001898 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1899
Fan Li9a950d52015-12-26 18:07:41 +08001900 ret = fiemap_fill_next_extent(fieinfo, logical,
1901 phys, size, flags);
Chao Yudd5a09b2020-06-29 20:13:13 +08001902 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
Chao Yubf38fba2020-03-28 17:40:40 +08001903 if (ret)
1904 goto out;
1905 size = 0;
Chao Yuda5af122016-01-08 20:19:27 +08001906 }
Fan Li9a950d52015-12-26 18:07:41 +08001907
Chao Yubf38fba2020-03-28 17:40:40 +08001908 if (start_blk > last_blk)
Fan Li9a950d52015-12-26 18:07:41 +08001909 goto out;
1910
Chao Yubf38fba2020-03-28 17:40:40 +08001911 if (compr_cluster) {
1912 compr_cluster = false;
1913
1914
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001915 logical = blks_to_bytes(inode, start_blk - 1);
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08001916 phys = blks_to_bytes(inode, map.m_pblk);
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001917 size = blks_to_bytes(inode, cluster_size);
Chao Yubf38fba2020-03-28 17:40:40 +08001918
1919 flags |= FIEMAP_EXTENT_ENCODED;
1920
1921 start_blk += cluster_size - 1;
1922
1923 if (start_blk > last_blk)
1924 goto out;
1925
1926 goto prep_next;
1927 }
1928
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08001929 if (map.m_pblk == COMPRESS_ADDR) {
Chao Yubf38fba2020-03-28 17:40:40 +08001930 compr_cluster = true;
1931 start_blk++;
1932 goto prep_next;
1933 }
1934
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001935 logical = blks_to_bytes(inode, start_blk);
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08001936 phys = blks_to_bytes(inode, map.m_pblk);
1937 size = blks_to_bytes(inode, map.m_len);
Fan Li9a950d52015-12-26 18:07:41 +08001938 flags = 0;
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08001939 if (map.m_flags & F2FS_MAP_UNWRITTEN)
Fan Li9a950d52015-12-26 18:07:41 +08001940 flags = FIEMAP_EXTENT_UNWRITTEN;
1941
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001942 start_blk += bytes_to_blks(inode, size);
Fan Li9a950d52015-12-26 18:07:41 +08001943
1944prep_next:
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001945 cond_resched();
1946 if (fatal_signal_pending(current))
1947 ret = -EINTR;
1948 else
1949 goto next;
1950out:
1951 if (ret == 1)
1952 ret = 0;
1953
Al Viro59551022016-01-22 15:40:57 -05001954 inode_unlock(inode);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001955 return ret;
Jaegeuk Kim9ab701342014-06-08 04:30:14 +09001956}
1957
Eric Biggers95ae2512019-07-22 09:26:24 -07001958static inline loff_t f2fs_readpage_limit(struct inode *inode)
1959{
1960 if (IS_ENABLED(CONFIG_FS_VERITY) &&
1961 (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
1962 return inode->i_sb->s_maxbytes;
1963
1964 return i_size_read(inode);
1965}
1966
Chao Yu2df0ab02019-03-25 21:07:30 +08001967static int f2fs_read_single_page(struct inode *inode, struct page *page,
1968 unsigned nr_pages,
1969 struct f2fs_map_blocks *map,
1970 struct bio **bio_ret,
1971 sector_t *last_block_in_bio,
1972 bool is_readahead)
1973{
1974 struct bio *bio = *bio_ret;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08001975 const unsigned blocksize = blks_to_bytes(inode, 1);
Chao Yu2df0ab02019-03-25 21:07:30 +08001976 sector_t block_in_file;
1977 sector_t last_block;
1978 sector_t last_block_in_file;
1979 sector_t block_nr;
1980 int ret = 0;
1981
Jaegeuk Kim4969c062019-07-01 19:15:29 -07001982 block_in_file = (sector_t)page_index(page);
Chao Yu2df0ab02019-03-25 21:07:30 +08001983 last_block = block_in_file + nr_pages;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08001984 last_block_in_file = bytes_to_blks(inode,
1985 f2fs_readpage_limit(inode) + blocksize - 1);
Chao Yu2df0ab02019-03-25 21:07:30 +08001986 if (last_block > last_block_in_file)
1987 last_block = last_block_in_file;
1988
1989 /* just zeroing out page which is beyond EOF */
1990 if (block_in_file >= last_block)
1991 goto zero_out;
1992 /*
1993 * Map blocks using the previous result first.
1994 */
1995 if ((map->m_flags & F2FS_MAP_MAPPED) &&
1996 block_in_file > map->m_lblk &&
1997 block_in_file < (map->m_lblk + map->m_len))
1998 goto got_it;
1999
2000 /*
2001 * Then do more f2fs_map_blocks() calls until we are
2002 * done with this page.
2003 */
2004 map->m_lblk = block_in_file;
2005 map->m_len = last_block - block_in_file;
2006
2007 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2008 if (ret)
2009 goto out;
2010got_it:
2011 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2012 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2013 SetPageMappedToDisk(page);
2014
Jaegeuk Kim4969c062019-07-01 19:15:29 -07002015 if (!PageUptodate(page) && (!PageSwapCache(page) &&
2016 !cleancache_get_page(page))) {
Chao Yu2df0ab02019-03-25 21:07:30 +08002017 SetPageUptodate(page);
2018 goto confused;
2019 }
2020
2021 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
Chao Yu93770ab2019-04-15 15:26:32 +08002022 DATA_GENERIC_ENHANCE_READ)) {
Chao Yu10f966b2019-06-20 11:36:14 +08002023 ret = -EFSCORRUPTED;
Chao Yu2df0ab02019-03-25 21:07:30 +08002024 goto out;
2025 }
2026 } else {
2027zero_out:
2028 zero_user_segment(page, 0, PAGE_SIZE);
Eric Biggers95ae2512019-07-22 09:26:24 -07002029 if (f2fs_need_verity(inode, page->index) &&
2030 !fsverity_verify_page(page)) {
2031 ret = -EIO;
2032 goto out;
2033 }
Chao Yu2df0ab02019-03-25 21:07:30 +08002034 if (!PageUptodate(page))
2035 SetPageUptodate(page);
2036 unlock_page(page);
2037 goto out;
2038 }
2039
2040 /*
2041 * This page will go to BIO. Do we need to send this
2042 * BIO off first?
2043 */
Satya Tangirala27aacd22020-07-02 01:56:06 +00002044 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2045 *last_block_in_bio, block_nr) ||
2046 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
Chao Yu2df0ab02019-03-25 21:07:30 +08002047submit_and_realloc:
2048 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2049 bio = NULL;
2050 }
2051 if (bio == NULL) {
2052 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
Chao Yu06837282020-02-18 18:21:35 +08002053 is_readahead ? REQ_RAHEAD : 0, page->index,
Eric Biggers7f59b272021-01-04 22:33:02 -08002054 false);
Chao Yu2df0ab02019-03-25 21:07:30 +08002055 if (IS_ERR(bio)) {
2056 ret = PTR_ERR(bio);
2057 bio = NULL;
2058 goto out;
2059 }
2060 }
2061
2062 /*
2063 * If the page is under writeback, we need to wait for
2064 * its completion to see the correct decrypted data.
2065 */
2066 f2fs_wait_on_block_writeback(inode, block_nr);
2067
2068 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2069 goto submit_and_realloc;
2070
2071 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
Chao Yu8b83ac82020-04-16 18:16:56 +08002072 f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
Chao Yu2df0ab02019-03-25 21:07:30 +08002073 ClearPageError(page);
2074 *last_block_in_bio = block_nr;
2075 goto out;
2076confused:
2077 if (bio) {
2078 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2079 bio = NULL;
2080 }
2081 unlock_page(page);
2082out:
2083 *bio_ret = bio;
2084 return ret;
2085}
2086
Chao Yu4c8ff702019-11-01 18:07:14 +08002087#ifdef CONFIG_F2FS_FS_COMPRESSION
2088int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2089 unsigned nr_pages, sector_t *last_block_in_bio,
Chao Yu06837282020-02-18 18:21:35 +08002090 bool is_readahead, bool for_write)
Chao Yu4c8ff702019-11-01 18:07:14 +08002091{
2092 struct dnode_of_data dn;
2093 struct inode *inode = cc->inode;
2094 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2095 struct bio *bio = *bio_ret;
2096 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2097 sector_t last_block_in_file;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08002098 const unsigned blocksize = blks_to_bytes(inode, 1);
Chao Yu4c8ff702019-11-01 18:07:14 +08002099 struct decompress_io_ctx *dic = NULL;
2100 int i;
2101 int ret = 0;
2102
2103 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2104
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08002105 last_block_in_file = bytes_to_blks(inode,
2106 f2fs_readpage_limit(inode) + blocksize - 1);
Chao Yu4c8ff702019-11-01 18:07:14 +08002107
2108 /* get rid of pages beyond EOF */
2109 for (i = 0; i < cc->cluster_size; i++) {
2110 struct page *page = cc->rpages[i];
2111
2112 if (!page)
2113 continue;
2114 if ((sector_t)page->index >= last_block_in_file) {
2115 zero_user_segment(page, 0, PAGE_SIZE);
2116 if (!PageUptodate(page))
2117 SetPageUptodate(page);
2118 } else if (!PageUptodate(page)) {
2119 continue;
2120 }
2121 unlock_page(page);
2122 cc->rpages[i] = NULL;
2123 cc->nr_rpages--;
2124 }
2125
2126 /* we are done since all pages are beyond EOF */
2127 if (f2fs_cluster_is_empty(cc))
2128 goto out;
2129
2130 set_new_dnode(&dn, inode, NULL, NULL, 0);
2131 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2132 if (ret)
2133 goto out;
2134
Chao Yua86d27d2020-07-29 21:21:35 +08002135 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
Chao Yu4c8ff702019-11-01 18:07:14 +08002136
2137 for (i = 1; i < cc->cluster_size; i++) {
2138 block_t blkaddr;
2139
Chao Yua2ced1c2020-02-14 17:44:10 +08002140 blkaddr = data_blkaddr(dn.inode, dn.node_page,
Chao Yu4c8ff702019-11-01 18:07:14 +08002141 dn.ofs_in_node + i);
2142
2143 if (!__is_valid_data_blkaddr(blkaddr))
2144 break;
2145
2146 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2147 ret = -EFAULT;
2148 goto out_put_dnode;
2149 }
2150 cc->nr_cpages++;
2151 }
2152
2153 /* nothing to decompress */
2154 if (cc->nr_cpages == 0) {
2155 ret = 0;
2156 goto out_put_dnode;
2157 }
2158
2159 dic = f2fs_alloc_dic(cc);
2160 if (IS_ERR(dic)) {
2161 ret = PTR_ERR(dic);
2162 goto out_put_dnode;
2163 }
2164
2165 for (i = 0; i < dic->nr_cpages; i++) {
2166 struct page *page = dic->cpages[i];
2167 block_t blkaddr;
Eric Biggers7f59b272021-01-04 22:33:02 -08002168 struct bio_post_read_ctx *ctx;
Chao Yu4c8ff702019-11-01 18:07:14 +08002169
Chao Yua2ced1c2020-02-14 17:44:10 +08002170 blkaddr = data_blkaddr(dn.inode, dn.node_page,
Chao Yu4c8ff702019-11-01 18:07:14 +08002171 dn.ofs_in_node + i + 1);
2172
Satya Tangirala27aacd22020-07-02 01:56:06 +00002173 if (bio && (!page_is_mergeable(sbi, bio,
2174 *last_block_in_bio, blkaddr) ||
2175 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002176submit_and_realloc:
2177 __submit_bio(sbi, bio, DATA);
2178 bio = NULL;
2179 }
2180
2181 if (!bio) {
2182 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2183 is_readahead ? REQ_RAHEAD : 0,
Eric Biggers7f59b272021-01-04 22:33:02 -08002184 page->index, for_write);
Chao Yu4c8ff702019-11-01 18:07:14 +08002185 if (IS_ERR(bio)) {
2186 ret = PTR_ERR(bio);
Eric Biggers7f59b272021-01-04 22:33:02 -08002187 f2fs_decompress_end_io(dic, ret);
Chao Yu4c8ff702019-11-01 18:07:14 +08002188 f2fs_put_dnode(&dn);
Chao Yuf3494342020-04-23 17:57:33 +08002189 *bio_ret = NULL;
Chao Yu4c8ff702019-11-01 18:07:14 +08002190 return ret;
2191 }
2192 }
2193
2194 f2fs_wait_on_block_writeback(inode, blkaddr);
2195
2196 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2197 goto submit_and_realloc;
2198
Chao Yu03382f12020-04-21 19:36:21 +08002199 ctx = bio->bi_private;
Eric Biggers7f59b272021-01-04 22:33:02 -08002200 ctx->enabled_steps |= STEP_DECOMPRESS;
2201 refcount_inc(&dic->refcnt);
Chao Yu03382f12020-04-21 19:36:21 +08002202
Chao Yu4c8ff702019-11-01 18:07:14 +08002203 inc_page_count(sbi, F2FS_RD_DATA);
Chao Yu8b83ac82020-04-16 18:16:56 +08002204 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
Chao Yu9c122382020-04-23 18:03:06 +08002205 f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
Chao Yu4c8ff702019-11-01 18:07:14 +08002206 ClearPageError(page);
2207 *last_block_in_bio = blkaddr;
2208 }
2209
2210 f2fs_put_dnode(&dn);
2211
2212 *bio_ret = bio;
2213 return 0;
2214
2215out_put_dnode:
2216 f2fs_put_dnode(&dn);
2217out:
Eric Biggers7f59b272021-01-04 22:33:02 -08002218 for (i = 0; i < cc->cluster_size; i++) {
2219 if (cc->rpages[i]) {
2220 ClearPageUptodate(cc->rpages[i]);
2221 ClearPageError(cc->rpages[i]);
2222 unlock_page(cc->rpages[i]);
2223 }
2224 }
Chao Yu4c8ff702019-11-01 18:07:14 +08002225 *bio_ret = bio;
2226 return ret;
2227}
2228#endif
2229
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002230/*
2231 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2232 * Major change was from block_size == page_size in f2fs by default.
2233 */
Matthew Wilcox (Oracle)e20a7692020-06-01 21:47:27 -07002234static int f2fs_mpage_readpages(struct inode *inode,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002235 struct readahead_control *rac, struct page *page)
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002236{
2237 struct bio *bio = NULL;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002238 sector_t last_block_in_bio = 0;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002239 struct f2fs_map_blocks map;
Chao Yu4c8ff702019-11-01 18:07:14 +08002240#ifdef CONFIG_F2FS_FS_COMPRESSION
2241 struct compress_ctx cc = {
2242 .inode = inode,
2243 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2244 .cluster_size = F2FS_I(inode)->i_cluster_size,
2245 .cluster_idx = NULL_CLUSTER,
2246 .rpages = NULL,
2247 .cpages = NULL,
2248 .nr_rpages = 0,
2249 .nr_cpages = 0,
2250 };
2251#endif
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002252 unsigned nr_pages = rac ? readahead_count(rac) : 1;
Chao Yu4c8ff702019-11-01 18:07:14 +08002253 unsigned max_nr_pages = nr_pages;
Chao Yu2df0ab02019-03-25 21:07:30 +08002254 int ret = 0;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002255
2256 map.m_pblk = 0;
2257 map.m_lblk = 0;
2258 map.m_len = 0;
2259 map.m_flags = 0;
Chao Yuda859852016-01-26 15:42:58 +08002260 map.m_next_pgofs = NULL;
Chao Yuc4020b22018-01-11 14:42:30 +08002261 map.m_next_extent = NULL;
Hyunchul Leed5097be2017-11-28 09:23:00 +09002262 map.m_seg_type = NO_CHECK_TYPE;
Chao Yuf9d6d052018-11-13 14:33:45 +08002263 map.m_may_create = false;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002264
LiFan736c0a72017-11-25 11:46:18 +08002265 for (; nr_pages; nr_pages--) {
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002266 if (rac) {
2267 page = readahead_page(rac);
Kinglong Meea83d50b2017-03-13 16:35:13 +08002268 prefetchw(&page->flags);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002269 }
2270
Chao Yu4c8ff702019-11-01 18:07:14 +08002271#ifdef CONFIG_F2FS_FS_COMPRESSION
2272 if (f2fs_compressed_file(inode)) {
2273 /* there are remained comressed pages, submit them */
2274 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2275 ret = f2fs_read_multi_pages(&cc, &bio,
2276 max_nr_pages,
2277 &last_block_in_bio,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002278 rac != NULL, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08002279 f2fs_destroy_compress_ctx(&cc);
2280 if (ret)
2281 goto set_error_page;
2282 }
2283 ret = f2fs_is_compressed_cluster(inode, page->index);
2284 if (ret < 0)
2285 goto set_error_page;
2286 else if (!ret)
2287 goto read_single_page;
2288
2289 ret = f2fs_init_compress_ctx(&cc);
2290 if (ret)
2291 goto set_error_page;
2292
2293 f2fs_compress_ctx_add_page(&cc, page);
2294
2295 goto next_page;
2296 }
2297read_single_page:
2298#endif
2299
2300 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002301 &bio, &last_block_in_bio, rac);
Chao Yu2df0ab02019-03-25 21:07:30 +08002302 if (ret) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002303#ifdef CONFIG_F2FS_FS_COMPRESSION
2304set_error_page:
2305#endif
Chao Yu2df0ab02019-03-25 21:07:30 +08002306 SetPageError(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002307 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002308 unlock_page(page);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002309 }
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002310#ifdef CONFIG_F2FS_FS_COMPRESSION
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002311next_page:
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002312#endif
2313 if (rac)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002314 put_page(page);
Chao Yu4c8ff702019-11-01 18:07:14 +08002315
2316#ifdef CONFIG_F2FS_FS_COMPRESSION
2317 if (f2fs_compressed_file(inode)) {
2318 /* last page */
2319 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2320 ret = f2fs_read_multi_pages(&cc, &bio,
2321 max_nr_pages,
2322 &last_block_in_bio,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002323 rac != NULL, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08002324 f2fs_destroy_compress_ctx(&cc);
2325 }
2326 }
2327#endif
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002328 }
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002329 if (bio)
Linus Torvalds4fc29c12016-07-27 10:36:31 -07002330 __submit_bio(F2FS_I_SB(inode), bio, DATA);
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002331 return ret;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002332}
2333
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002334static int f2fs_read_data_page(struct file *file, struct page *page)
2335{
Jaegeuk Kim4969c062019-07-01 19:15:29 -07002336 struct inode *inode = page_file_mapping(page)->host;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002337 int ret = -EAGAIN;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002338
Chao Yuc20e89c2014-05-06 16:53:08 +08002339 trace_f2fs_readpage(page, DATA);
2340
Chao Yu4c8ff702019-11-01 18:07:14 +08002341 if (!f2fs_is_compress_backend_ready(inode)) {
2342 unlock_page(page);
2343 return -EOPNOTSUPP;
2344 }
2345
arter97e1c42042014-08-06 23:22:50 +09002346 /* If the file has inline data, try to read it directly */
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002347 if (f2fs_has_inline_data(inode))
2348 ret = f2fs_read_inline_data(inode, page);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002349 if (ret == -EAGAIN)
Matthew Wilcox (Oracle)e20a7692020-06-01 21:47:27 -07002350 ret = f2fs_mpage_readpages(inode, NULL, page);
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002351 return ret;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002352}
2353
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002354static void f2fs_readahead(struct readahead_control *rac)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002355{
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002356 struct inode *inode = rac->mapping->host;
Chao Yub8c29402015-10-12 17:02:26 +08002357
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002358 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002359
Chao Yu4c8ff702019-11-01 18:07:14 +08002360 if (!f2fs_is_compress_backend_ready(inode))
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002361 return;
Chao Yu4c8ff702019-11-01 18:07:14 +08002362
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002363 /* If the file has inline data, skip readpages */
2364 if (f2fs_has_inline_data(inode))
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002365 return;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002366
Matthew Wilcox (Oracle)e20a7692020-06-01 21:47:27 -07002367 f2fs_mpage_readpages(inode, rac, NULL);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002368}
2369
Chao Yu4c8ff702019-11-01 18:07:14 +08002370int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002371{
2372 struct inode *inode = fio->page->mapping->host;
Chao Yu4c8ff702019-11-01 18:07:14 +08002373 struct page *mpage, *page;
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002374 gfp_t gfp_flags = GFP_NOFS;
2375
Jaegeuk Kim19585932017-09-05 16:54:24 -07002376 if (!f2fs_encrypted_file(inode))
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002377 return 0;
2378
Chao Yu4c8ff702019-11-01 18:07:14 +08002379 page = fio->compressed_page ? fio->compressed_page : fio->page;
2380
Eric Biggers6dbb1792018-04-18 11:09:48 -07002381 /* wait for GCed page writeback via META_MAPPING */
Jaegeuk Kim0ded69f2018-08-22 21:18:00 -07002382 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002383
Satya Tangirala27aacd22020-07-02 01:56:06 +00002384 if (fscrypt_inode_uses_inline_crypto(inode))
2385 return 0;
2386
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002387retry_encrypt:
Chao Yu4c8ff702019-11-01 18:07:14 +08002388 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2389 PAGE_SIZE, 0, gfp_flags);
Chao Yu6aa58d82018-08-14 22:37:25 +08002390 if (IS_ERR(fio->encrypted_page)) {
2391 /* flush pending IOs and wait for a while in the ENOMEM case */
2392 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2393 f2fs_flush_merged_writes(fio->sbi);
Chao Yu5df7731f2020-02-17 17:45:44 +08002394 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
Chao Yu6aa58d82018-08-14 22:37:25 +08002395 gfp_flags |= __GFP_NOFAIL;
2396 goto retry_encrypt;
2397 }
2398 return PTR_ERR(fio->encrypted_page);
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002399 }
Chao Yu6aa58d82018-08-14 22:37:25 +08002400
2401 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2402 if (mpage) {
2403 if (PageUptodate(mpage))
2404 memcpy(page_address(mpage),
2405 page_address(fio->encrypted_page), PAGE_SIZE);
2406 f2fs_put_page(mpage, 1);
2407 }
2408 return 0;
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002409}
2410
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002411static inline bool check_inplace_update_policy(struct inode *inode,
2412 struct f2fs_io_info *fio)
2413{
2414 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2415 unsigned int policy = SM_I(sbi)->ipu_policy;
2416
2417 if (policy & (0x1 << F2FS_IPU_FORCE))
2418 return true;
Chao Yu4d57b862018-05-30 00:20:41 +08002419 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002420 return true;
2421 if (policy & (0x1 << F2FS_IPU_UTIL) &&
2422 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2423 return true;
Chao Yu4d57b862018-05-30 00:20:41 +08002424 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002425 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2426 return true;
2427
2428 /*
2429 * IPU for rewrite async pages
2430 */
2431 if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2432 fio && fio->op == REQ_OP_WRITE &&
2433 !(fio->op_flags & REQ_SYNC) &&
Chandan Rajendra62230e0d2018-12-12 15:20:11 +05302434 !IS_ENCRYPTED(inode))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002435 return true;
2436
2437 /* this is only set during fdatasync */
2438 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2439 is_inode_flag_set(inode, FI_NEED_IPU))
2440 return true;
2441
Daniel Rosenberg43549942018-08-20 19:21:43 -07002442 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2443 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2444 return true;
2445
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002446 return false;
2447}
2448
Chao Yu4d57b862018-05-30 00:20:41 +08002449bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002450{
2451 if (f2fs_is_pinned_file(inode))
2452 return true;
2453
2454 /* if this is cold file, we should overwrite to avoid fragmentation */
2455 if (file_is_cold(inode))
2456 return true;
2457
2458 return check_inplace_update_policy(inode, fio);
2459}
2460
Chao Yu4d57b862018-05-30 00:20:41 +08002461bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002462{
2463 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2464
Chao Yub0332a02020-02-14 17:44:12 +08002465 if (f2fs_lfs_mode(sbi))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002466 return true;
2467 if (S_ISDIR(inode->i_mode))
2468 return true;
Chao Yuaf033b22018-09-20 20:05:00 +08002469 if (IS_NOQUOTA(inode))
2470 return true;
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002471 if (f2fs_is_atomic_file(inode))
2472 return true;
2473 if (fio) {
2474 if (is_cold_data(fio->page))
2475 return true;
2476 if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
2477 return true;
Daniel Rosenberg43549942018-08-20 19:21:43 -07002478 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2479 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2480 return true;
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002481 }
2482 return false;
2483}
2484
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002485static inline bool need_inplace_update(struct f2fs_io_info *fio)
2486{
2487 struct inode *inode = fio->page->mapping->host;
2488
Chao Yu4d57b862018-05-30 00:20:41 +08002489 if (f2fs_should_update_outplace(inode, fio))
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002490 return false;
2491
Chao Yu4d57b862018-05-30 00:20:41 +08002492 return f2fs_should_update_inplace(inode, fio);
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002493}
2494
Chao Yu4d57b862018-05-30 00:20:41 +08002495int f2fs_do_write_data_page(struct f2fs_io_info *fio)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002496{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002497 struct page *page = fio->page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002498 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002499 struct dnode_of_data dn;
Hou Pengyange959c8f2017-04-25 12:45:13 +00002500 struct extent_info ei = {0,0,0};
Chao Yu77357302018-07-17 00:02:17 +08002501 struct node_info ni;
Hou Pengyange959c8f2017-04-25 12:45:13 +00002502 bool ipu_force = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002503 int err = 0;
2504
2505 set_new_dnode(&dn, inode, NULL, NULL, 0);
Hou Pengyange959c8f2017-04-25 12:45:13 +00002506 if (need_inplace_update(fio) &&
2507 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2508 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
Jaegeuk Kima8177372017-04-24 15:20:16 -07002509
Chao Yuc9b60782018-08-01 19:13:44 +08002510 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
Chao Yu93770ab2019-04-15 15:26:32 +08002511 DATA_GENERIC_ENHANCE))
Chao Yu10f966b2019-06-20 11:36:14 +08002512 return -EFSCORRUPTED;
Chao Yuc9b60782018-08-01 19:13:44 +08002513
2514 ipu_force = true;
2515 fio->need_lock = LOCK_DONE;
2516 goto got_it;
Hou Pengyange959c8f2017-04-25 12:45:13 +00002517 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002518
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07002519 /* Deadlock due to between page->lock and f2fs_lock_op */
2520 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2521 return -EAGAIN;
Hou Pengyang279d6df2017-04-27 00:17:21 +08002522
Chao Yu4d57b862018-05-30 00:20:41 +08002523 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002524 if (err)
Hou Pengyang279d6df2017-04-27 00:17:21 +08002525 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002526
Chao Yu28bc1062016-02-06 14:40:34 +08002527 fio->old_blkaddr = dn.data_blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002528
2529 /* This page is already truncated */
Chao Yu7a9d7542016-02-22 18:36:38 +08002530 if (fio->old_blkaddr == NULL_ADDR) {
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08002531 ClearPageUptodate(page);
Chao Yu2baf0782018-07-27 18:15:16 +08002532 clear_cold_data(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002533 goto out_writepage;
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08002534 }
Hou Pengyange959c8f2017-04-25 12:45:13 +00002535got_it:
Chao Yuc9b60782018-08-01 19:13:44 +08002536 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2537 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
Chao Yu93770ab2019-04-15 15:26:32 +08002538 DATA_GENERIC_ENHANCE)) {
Chao Yu10f966b2019-06-20 11:36:14 +08002539 err = -EFSCORRUPTED;
Chao Yuc9b60782018-08-01 19:13:44 +08002540 goto out_writepage;
2541 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002542 /*
2543 * If current allocation needs SSR,
2544 * it had better in-place writes for updated data.
2545 */
Chao Yu93770ab2019-04-15 15:26:32 +08002546 if (ipu_force ||
2547 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
Chao Yu7b525dd2018-05-23 22:25:08 +08002548 need_inplace_update(fio))) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002549 err = f2fs_encrypt_one_page(fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002550 if (err)
2551 goto out_writepage;
2552
2553 set_page_writeback(page);
Jaegeuk Kim17c50032018-04-11 23:09:04 -07002554 ClearPageError(page);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002555 f2fs_put_dnode(&dn);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002556 if (fio->need_lock == LOCK_REQ)
Hou Pengyang279d6df2017-04-27 00:17:21 +08002557 f2fs_unlock_op(fio->sbi);
Chao Yu4d57b862018-05-30 00:20:41 +08002558 err = f2fs_inplace_write_data(fio);
Chao Yu6492a332019-02-21 20:37:14 +08002559 if (err) {
Satya Tangirala27aacd22020-07-02 01:56:06 +00002560 if (fscrypt_inode_uses_fs_layer_crypto(inode))
Eric Biggersd2d07272019-05-20 09:29:39 -07002561 fscrypt_finalize_bounce_page(&fio->encrypted_page);
Chao Yu6492a332019-02-21 20:37:14 +08002562 if (PageWriteback(page))
2563 end_page_writeback(page);
Chao Yucd23ffa92019-04-15 15:30:53 +08002564 } else {
2565 set_inode_flag(inode, FI_UPDATE_WRITE);
Chao Yu6492a332019-02-21 20:37:14 +08002566 }
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002567 trace_f2fs_do_write_data_page(fio->page, IPU);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002568 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002569 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002570
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002571 if (fio->need_lock == LOCK_RETRY) {
2572 if (!f2fs_trylock_op(fio->sbi)) {
2573 err = -EAGAIN;
2574 goto out_writepage;
2575 }
2576 fio->need_lock = LOCK_REQ;
2577 }
2578
Chao Yu77357302018-07-17 00:02:17 +08002579 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2580 if (err)
2581 goto out_writepage;
2582
2583 fio->version = ni.version;
2584
Chao Yu4c8ff702019-11-01 18:07:14 +08002585 err = f2fs_encrypt_one_page(fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002586 if (err)
2587 goto out_writepage;
2588
2589 set_page_writeback(page);
Jaegeuk Kim17c50032018-04-11 23:09:04 -07002590 ClearPageError(page);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002591
Chao Yu4c8ff702019-11-01 18:07:14 +08002592 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2593 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2594
Hou Pengyang279d6df2017-04-27 00:17:21 +08002595 /* LFS mode write path */
Chao Yu4d57b862018-05-30 00:20:41 +08002596 f2fs_outplace_write_data(&dn, fio);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002597 trace_f2fs_do_write_data_page(page, OPU);
2598 set_inode_flag(inode, FI_APPEND_WRITE);
2599 if (page->index == 0)
2600 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002601out_writepage:
2602 f2fs_put_dnode(&dn);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002603out:
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002604 if (fio->need_lock == LOCK_REQ)
Hou Pengyang279d6df2017-04-27 00:17:21 +08002605 f2fs_unlock_op(fio->sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002606 return err;
2607}
2608
Chao Yu4c8ff702019-11-01 18:07:14 +08002609int f2fs_write_single_data_page(struct page *page, int *submitted,
Chao Yu8648de22019-02-19 16:15:29 +08002610 struct bio **bio,
2611 sector_t *last_block,
Chao Yub0af6d42017-08-02 23:21:48 +08002612 struct writeback_control *wbc,
Chao Yu4c8ff702019-11-01 18:07:14 +08002613 enum iostat_type io_type,
Chao Yu3afae092021-01-11 17:42:53 +08002614 int compr_blocks,
2615 bool allow_balance)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002616{
2617 struct inode *inode = page->mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07002618 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002619 loff_t i_size = i_size_read(inode);
Chao Yu4c8ff702019-11-01 18:07:14 +08002620 const pgoff_t end_index = ((unsigned long long)i_size)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002621 >> PAGE_SHIFT;
Chao Yu1f0d5c92019-11-07 17:29:00 +08002622 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002623 unsigned offset = 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +09002624 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002625 int err = 0;
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002626 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002627 .sbi = sbi,
Chao Yu39d787b2017-09-29 13:59:38 +08002628 .ino = inode->i_ino,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002629 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -05002630 .op = REQ_OP_WRITE,
Jens Axboe76372412016-11-01 10:00:38 -06002631 .op_flags = wbc_to_write_flags(wbc),
Hou Pengyange959c8f2017-04-25 12:45:13 +00002632 .old_blkaddr = NULL_ADDR,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002633 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07002634 .encrypted_page = NULL,
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002635 .submitted = false,
Chao Yu4c8ff702019-11-01 18:07:14 +08002636 .compr_blocks = compr_blocks,
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002637 .need_lock = LOCK_RETRY,
Chao Yub0af6d42017-08-02 23:21:48 +08002638 .io_type = io_type,
Yufen Yu578c6472018-01-09 19:33:39 +08002639 .io_wbc = wbc,
Chao Yu8648de22019-02-19 16:15:29 +08002640 .bio = bio,
2641 .last_block = last_block,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002642 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002643
Chao Yuecda0de2014-05-06 16:48:26 +08002644 trace_f2fs_writepage(page, DATA);
2645
Chao Yudb198ae2018-01-18 17:29:10 +08002646 /* we should bypass data pages to proceed the kworkder jobs */
2647 if (unlikely(f2fs_cp_error(sbi))) {
2648 mapping_set_error(page->mapping, -EIO);
Chao Yu1174abf2018-05-28 16:59:26 +08002649 /*
2650 * don't drop any dirty dentry pages for keeping lastest
2651 * directory structure.
2652 */
2653 if (S_ISDIR(inode->i_mode))
2654 goto redirty_out;
Chao Yudb198ae2018-01-18 17:29:10 +08002655 goto out;
2656 }
2657
Chao Yu0771fcc2017-06-29 23:20:45 +08002658 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2659 goto redirty_out;
2660
Chao Yu4c8ff702019-11-01 18:07:14 +08002661 if (page->index < end_index ||
2662 f2fs_verity_in_progress(inode) ||
2663 compr_blocks)
Jaegeuk Kim39936832012-11-22 16:21:29 +09002664 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002665
2666 /*
2667 * If the offset is out-of-range of file size,
2668 * this page does not have to be written to disk.
2669 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002670 offset = i_size & (PAGE_SIZE - 1);
Jaegeuk Kim76f60262014-04-15 16:04:15 +09002671 if ((page->index >= end_index + 1) || !offset)
Jaegeuk Kim39936832012-11-22 16:21:29 +09002672 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002673
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002674 zero_user_segment(page, offset, PAGE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +09002675write:
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002676 if (f2fs_is_drop_cache(inode))
2677 goto out;
Jaegeuk Kime6e5f562016-04-14 16:48:52 -07002678 /* we should not write 0'th page having journal header */
2679 if (f2fs_is_volatile_file(inode) && (!page->index ||
2680 (!wbc->for_reclaim &&
Chao Yu4d57b862018-05-30 00:20:41 +08002681 f2fs_available_free_memory(sbi, BASE_CHECK))))
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002682 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002683
Jaegeuk Kim435cbab2020-04-09 10:25:21 -07002684 /* Dentry/quota blocks are controlled by checkpoint */
2685 if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
Chao Yu79963d92020-06-18 14:36:23 +08002686 /*
2687 * We need to wait for node_write to avoid block allocation during
2688 * checkpoint. This can only happen to quota writes which can cause
2689 * the below discard race condition.
2690 */
2691 if (IS_NOQUOTA(inode))
2692 down_read(&sbi->node_write);
2693
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002694 fio.need_lock = LOCK_DONE;
Chao Yu4d57b862018-05-30 00:20:41 +08002695 err = f2fs_do_write_data_page(&fio);
Chao Yu79963d92020-06-18 14:36:23 +08002696
2697 if (IS_NOQUOTA(inode))
2698 up_read(&sbi->node_write);
2699
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07002700 goto done;
2701 }
2702
Jaegeuk Kim8618b882014-02-17 19:29:27 +09002703 if (!wbc->for_reclaim)
2704 need_balance_fs = true;
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -07002705 else if (has_not_enough_free_secs(sbi, 0, 0))
Jaegeuk Kim39936832012-11-22 16:21:29 +09002706 goto redirty_out;
Jaegeuk Kimef095d12017-03-24 20:05:13 -04002707 else
2708 set_inode_flag(inode, FI_HOT_DATA);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002709
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002710 err = -EAGAIN;
Yunlei Hedd7b2332017-02-23 20:31:20 +08002711 if (f2fs_has_inline_data(inode)) {
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002712 err = f2fs_write_inline_data(inode, page);
Yunlei Hedd7b2332017-02-23 20:31:20 +08002713 if (!err)
2714 goto out;
2715 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002716
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002717 if (err == -EAGAIN) {
Chao Yu4d57b862018-05-30 00:20:41 +08002718 err = f2fs_do_write_data_page(&fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002719 if (err == -EAGAIN) {
2720 fio.need_lock = LOCK_REQ;
Chao Yu4d57b862018-05-30 00:20:41 +08002721 err = f2fs_do_write_data_page(&fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002722 }
2723 }
Chao Yua0d00fa2017-10-09 17:55:19 +08002724
Chao Yueb449792018-01-17 16:31:37 +08002725 if (err) {
2726 file_set_keep_isize(inode);
2727 } else {
Chao Yuc10c9822020-02-27 19:30:03 +08002728 spin_lock(&F2FS_I(inode)->i_size_lock);
Chao Yueb449792018-01-17 16:31:37 +08002729 if (F2FS_I(inode)->last_disk_size < psize)
2730 F2FS_I(inode)->last_disk_size = psize;
Chao Yuc10c9822020-02-27 19:30:03 +08002731 spin_unlock(&F2FS_I(inode)->i_size_lock);
Chao Yueb449792018-01-17 16:31:37 +08002732 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002733
Jaegeuk Kim8618b882014-02-17 19:29:27 +09002734done:
2735 if (err && err != -ENOENT)
2736 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002737
Jaegeuk Kim39936832012-11-22 16:21:29 +09002738out:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07002739 inode_dec_dirty_pages(inode);
Chao Yu2baf0782018-07-27 18:15:16 +08002740 if (err) {
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08002741 ClearPageUptodate(page);
Chao Yu2baf0782018-07-27 18:15:16 +08002742 clear_cold_data(page);
2743 }
Chao Yu0c3a5792016-01-18 18:28:11 +08002744
2745 if (wbc->for_reclaim) {
Chao Yubab475c2018-09-27 23:41:16 +08002746 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
Jaegeuk Kimef095d12017-03-24 20:05:13 -04002747 clear_inode_flag(inode, FI_HOT_DATA);
Chao Yu4d57b862018-05-30 00:20:41 +08002748 f2fs_remove_dirty_inode(inode);
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002749 submitted = NULL;
Chao Yueb7e8132015-11-10 18:45:07 +08002750 }
Chao Yu0c3a5792016-01-18 18:28:11 +08002751 unlock_page(page);
Chao Yu186857c2019-04-02 18:52:19 +08002752 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
Chao Yu3afae092021-01-11 17:42:53 +08002753 !F2FS_I(inode)->cp_task && allow_balance)
Jaegeuk Kima7881892017-04-20 13:51:57 -07002754 f2fs_balance_fs(sbi, need_balance_fs);
Chao Yu0c3a5792016-01-18 18:28:11 +08002755
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002756 if (unlikely(f2fs_cp_error(sbi))) {
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07002757 f2fs_submit_merged_write(sbi, DATA);
Chao Yu0b20fce2019-09-30 18:53:25 +08002758 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002759 submitted = NULL;
2760 }
2761
2762 if (submitted)
Chao Yu4c8ff702019-11-01 18:07:14 +08002763 *submitted = fio.submitted ? 1 : 0;
Chao Yu0c3a5792016-01-18 18:28:11 +08002764
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002765 return 0;
2766
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002767redirty_out:
Jaegeuk Kim76f60262014-04-15 16:04:15 +09002768 redirty_page_for_writepage(wbc, page);
Jaegeuk Kim5b19d282018-05-03 23:26:02 -07002769 /*
2770 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2771 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2772 * file_write_and_wait_range() will see EIO error, which is critical
2773 * to return value of fsync() followed by atomic_write failure to user.
2774 */
2775 if (!err || wbc->for_reclaim)
Chao Yu0002b612016-11-28 19:13:43 -08002776 return AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07002777 unlock_page(page);
2778 return err;
Namjae Jeonfa9150a2013-01-15 16:45:24 +09002779}
2780
Jaegeuk Kimf566bae2017-02-03 17:18:00 -08002781static int f2fs_write_data_page(struct page *page,
2782 struct writeback_control *wbc)
2783{
Chao Yu4c8ff702019-11-01 18:07:14 +08002784#ifdef CONFIG_F2FS_FS_COMPRESSION
2785 struct inode *inode = page->mapping->host;
2786
2787 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2788 goto out;
2789
2790 if (f2fs_compressed_file(inode)) {
2791 if (f2fs_is_compressed_cluster(inode, page->index)) {
2792 redirty_page_for_writepage(wbc, page);
2793 return AOP_WRITEPAGE_ACTIVATE;
2794 }
2795 }
2796out:
2797#endif
2798
2799 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
Chao Yu3afae092021-01-11 17:42:53 +08002800 wbc, FS_DATA_IO, 0, true);
Jaegeuk Kimf566bae2017-02-03 17:18:00 -08002801}
2802
Chao Yu8f46dca2015-07-14 18:56:10 +08002803/*
2804 * This function was copied from write_cche_pages from mm/page-writeback.c.
2805 * The major change is making write step of cold data page separately from
2806 * warm/hot data page.
2807 */
2808static int f2fs_write_cache_pages(struct address_space *mapping,
Chao Yub0af6d42017-08-02 23:21:48 +08002809 struct writeback_control *wbc,
2810 enum iostat_type io_type)
Chao Yu8f46dca2015-07-14 18:56:10 +08002811{
2812 int ret = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08002813 int done = 0, retry = 0;
Chao Yu8f46dca2015-07-14 18:56:10 +08002814 struct pagevec pvec;
Chao Yuc29fd0c2018-06-04 23:20:36 +08002815 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
Chao Yu8648de22019-02-19 16:15:29 +08002816 struct bio *bio = NULL;
2817 sector_t last_block;
Chao Yu4c8ff702019-11-01 18:07:14 +08002818#ifdef CONFIG_F2FS_FS_COMPRESSION
2819 struct inode *inode = mapping->host;
2820 struct compress_ctx cc = {
2821 .inode = inode,
2822 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2823 .cluster_size = F2FS_I(inode)->i_cluster_size,
2824 .cluster_idx = NULL_CLUSTER,
2825 .rpages = NULL,
2826 .nr_rpages = 0,
2827 .cpages = NULL,
2828 .rbuf = NULL,
2829 .cbuf = NULL,
2830 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2831 .private = NULL,
2832 };
2833#endif
Chao Yu8f46dca2015-07-14 18:56:10 +08002834 int nr_pages;
Chao Yu8f46dca2015-07-14 18:56:10 +08002835 pgoff_t index;
2836 pgoff_t end; /* Inclusive */
2837 pgoff_t done_index;
Chao Yu8f46dca2015-07-14 18:56:10 +08002838 int range_whole = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05002839 xa_mark_t tag;
Chao Yubab475c2018-09-27 23:41:16 +08002840 int nwritten = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08002841 int submitted = 0;
2842 int i;
Chao Yu8f46dca2015-07-14 18:56:10 +08002843
Mel Gorman86679822017-11-15 17:37:52 -08002844 pagevec_init(&pvec);
Jaegeuk Kim46ae9572016-05-25 20:57:16 -07002845
Jaegeuk Kimef095d12017-03-24 20:05:13 -04002846 if (get_dirty_pages(mapping->host) <=
2847 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2848 set_inode_flag(mapping->host, FI_HOT_DATA);
2849 else
2850 clear_inode_flag(mapping->host, FI_HOT_DATA);
2851
Chao Yu8f46dca2015-07-14 18:56:10 +08002852 if (wbc->range_cyclic) {
Jason Yan4df7a75f2020-06-15 16:51:32 +08002853 index = mapping->writeback_index; /* prev offset */
Chao Yu8f46dca2015-07-14 18:56:10 +08002854 end = -1;
2855 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002856 index = wbc->range_start >> PAGE_SHIFT;
2857 end = wbc->range_end >> PAGE_SHIFT;
Chao Yu8f46dca2015-07-14 18:56:10 +08002858 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2859 range_whole = 1;
Chao Yu8f46dca2015-07-14 18:56:10 +08002860 }
2861 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2862 tag = PAGECACHE_TAG_TOWRITE;
2863 else
2864 tag = PAGECACHE_TAG_DIRTY;
2865retry:
Chao Yu4c8ff702019-11-01 18:07:14 +08002866 retry = 0;
Chao Yu8f46dca2015-07-14 18:56:10 +08002867 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2868 tag_pages_for_writeback(mapping, index, end);
2869 done_index = index;
Chao Yu4c8ff702019-11-01 18:07:14 +08002870 while (!done && !retry && (index <= end)) {
Jan Kara69c4f352017-11-15 17:34:48 -08002871 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -08002872 tag);
Chao Yu8f46dca2015-07-14 18:56:10 +08002873 if (nr_pages == 0)
2874 break;
2875
2876 for (i = 0; i < nr_pages; i++) {
2877 struct page *page = pvec.pages[i];
Chao Yu4c8ff702019-11-01 18:07:14 +08002878 bool need_readd;
2879readd:
2880 need_readd = false;
2881#ifdef CONFIG_F2FS_FS_COMPRESSION
2882 if (f2fs_compressed_file(inode)) {
2883 ret = f2fs_init_compress_ctx(&cc);
2884 if (ret) {
2885 done = 1;
2886 break;
2887 }
Chao Yu8f46dca2015-07-14 18:56:10 +08002888
Chao Yu4c8ff702019-11-01 18:07:14 +08002889 if (!f2fs_cluster_can_merge_page(&cc,
2890 page->index)) {
2891 ret = f2fs_write_multi_pages(&cc,
2892 &submitted, wbc, io_type);
2893 if (!ret)
2894 need_readd = true;
2895 goto result;
2896 }
2897
2898 if (unlikely(f2fs_cp_error(sbi)))
2899 goto lock_page;
2900
2901 if (f2fs_cluster_is_empty(&cc)) {
2902 void *fsdata = NULL;
2903 struct page *pagep;
2904 int ret2;
2905
2906 ret2 = f2fs_prepare_compress_overwrite(
2907 inode, &pagep,
2908 page->index, &fsdata);
2909 if (ret2 < 0) {
2910 ret = ret2;
2911 done = 1;
2912 break;
2913 } else if (ret2 &&
2914 !f2fs_compress_write_end(inode,
2915 fsdata, page->index,
2916 1)) {
2917 retry = 1;
2918 break;
2919 }
2920 } else {
2921 goto lock_page;
2922 }
2923 }
2924#endif
Chao Yuf8de4332018-05-23 22:25:09 +08002925 /* give a priority to WB_SYNC threads */
Chao Yuc29fd0c2018-06-04 23:20:36 +08002926 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
Chao Yuf8de4332018-05-23 22:25:09 +08002927 wbc->sync_mode == WB_SYNC_NONE) {
2928 done = 1;
2929 break;
2930 }
Chao Yu4c8ff702019-11-01 18:07:14 +08002931#ifdef CONFIG_F2FS_FS_COMPRESSION
2932lock_page:
2933#endif
Chao Yu8f46dca2015-07-14 18:56:10 +08002934 done_index = page->index;
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07002935retry_write:
Chao Yu8f46dca2015-07-14 18:56:10 +08002936 lock_page(page);
2937
2938 if (unlikely(page->mapping != mapping)) {
2939continue_unlock:
2940 unlock_page(page);
2941 continue;
2942 }
2943
2944 if (!PageDirty(page)) {
2945 /* someone wrote it for us */
2946 goto continue_unlock;
2947 }
2948
Chao Yu8f46dca2015-07-14 18:56:10 +08002949 if (PageWriteback(page)) {
Chao Yu0b20fce2019-09-30 18:53:25 +08002950 if (wbc->sync_mode != WB_SYNC_NONE)
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08002951 f2fs_wait_on_page_writeback(page,
Chao Yubae0ee72018-12-25 17:43:42 +08002952 DATA, true, true);
Chao Yu0b20fce2019-09-30 18:53:25 +08002953 else
Chao Yu8f46dca2015-07-14 18:56:10 +08002954 goto continue_unlock;
2955 }
2956
Chao Yu8f46dca2015-07-14 18:56:10 +08002957 if (!clear_page_dirty_for_io(page))
2958 goto continue_unlock;
2959
Chao Yu4c8ff702019-11-01 18:07:14 +08002960#ifdef CONFIG_F2FS_FS_COMPRESSION
2961 if (f2fs_compressed_file(inode)) {
2962 get_page(page);
2963 f2fs_compress_ctx_add_page(&cc, page);
2964 continue;
2965 }
2966#endif
2967 ret = f2fs_write_single_data_page(page, &submitted,
Chao Yu3afae092021-01-11 17:42:53 +08002968 &bio, &last_block, wbc, io_type,
2969 0, true);
Chao Yu4c8ff702019-11-01 18:07:14 +08002970 if (ret == AOP_WRITEPAGE_ACTIVATE)
2971 unlock_page(page);
2972#ifdef CONFIG_F2FS_FS_COMPRESSION
2973result:
2974#endif
2975 nwritten += submitted;
2976 wbc->nr_to_write -= submitted;
2977
Chao Yu8f46dca2015-07-14 18:56:10 +08002978 if (unlikely(ret)) {
Chao Yu0002b612016-11-28 19:13:43 -08002979 /*
2980 * keep nr_to_write, since vfs uses this to
2981 * get # of written pages.
2982 */
2983 if (ret == AOP_WRITEPAGE_ACTIVATE) {
Chao Yu0002b612016-11-28 19:13:43 -08002984 ret = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08002985 goto next;
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07002986 } else if (ret == -EAGAIN) {
2987 ret = 0;
2988 if (wbc->sync_mode == WB_SYNC_ALL) {
2989 cond_resched();
2990 congestion_wait(BLK_RW_ASYNC,
Chao Yu5df7731f2020-02-17 17:45:44 +08002991 DEFAULT_IO_TIMEOUT);
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07002992 goto retry_write;
2993 }
Chao Yu4c8ff702019-11-01 18:07:14 +08002994 goto next;
Chao Yu0002b612016-11-28 19:13:43 -08002995 }
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07002996 done_index = page->index + 1;
2997 done = 1;
2998 break;
Chao Yu8f46dca2015-07-14 18:56:10 +08002999 }
3000
Chao Yu4c8ff702019-11-01 18:07:14 +08003001 if (wbc->nr_to_write <= 0 &&
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003002 wbc->sync_mode == WB_SYNC_NONE) {
Chao Yu8f46dca2015-07-14 18:56:10 +08003003 done = 1;
3004 break;
3005 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003006next:
3007 if (need_readd)
3008 goto readd;
Chao Yu8f46dca2015-07-14 18:56:10 +08003009 }
3010 pagevec_release(&pvec);
3011 cond_resched();
3012 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003013#ifdef CONFIG_F2FS_FS_COMPRESSION
3014 /* flush remained pages in compress cluster */
3015 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3016 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3017 nwritten += submitted;
3018 wbc->nr_to_write -= submitted;
3019 if (ret) {
3020 done = 1;
3021 retry = 0;
3022 }
3023 }
Jaegeuk Kimadfc6942020-09-23 00:54:50 -07003024 if (f2fs_compressed_file(inode))
3025 f2fs_destroy_compress_ctx(&cc);
Chao Yu4c8ff702019-11-01 18:07:14 +08003026#endif
Sahitya Tummalae78790f2020-06-02 18:11:47 +05303027 if (retry) {
Chao Yu8f46dca2015-07-14 18:56:10 +08003028 index = 0;
Sahitya Tummalae78790f2020-06-02 18:11:47 +05303029 end = -1;
Chao Yu8f46dca2015-07-14 18:56:10 +08003030 goto retry;
3031 }
Sahitya Tummalae78790f2020-06-02 18:11:47 +05303032 if (wbc->range_cyclic && !done)
3033 done_index = 0;
Chao Yu8f46dca2015-07-14 18:56:10 +08003034 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3035 mapping->writeback_index = done_index;
3036
Chao Yubab475c2018-09-27 23:41:16 +08003037 if (nwritten)
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07003038 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
Chao Yubab475c2018-09-27 23:41:16 +08003039 NULL, 0, DATA);
Chao Yu8648de22019-02-19 16:15:29 +08003040 /* submit cached bio of IPU write */
3041 if (bio)
Chao Yu0b20fce2019-09-30 18:53:25 +08003042 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
Chao Yu6ca56ca2016-09-29 18:50:11 +08003043
Chao Yu8f46dca2015-07-14 18:56:10 +08003044 return ret;
3045}
3046
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003047static inline bool __should_serialize_io(struct inode *inode,
3048 struct writeback_control *wbc)
3049{
Chao Yu040d2bb2019-05-20 17:36:59 +08003050 /* to avoid deadlock in path of data flush */
3051 if (F2FS_I(inode)->cp_task)
3052 return false;
Chao Yub13f67ff2020-03-19 19:57:57 +08003053
3054 if (!S_ISREG(inode->i_mode))
3055 return false;
3056 if (IS_NOQUOTA(inode))
3057 return false;
3058
Daeho Jeong602a16d2020-12-01 13:08:02 +09003059 if (f2fs_need_compress_data(inode))
Chao Yub13f67ff2020-03-19 19:57:57 +08003060 return true;
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003061 if (wbc->sync_mode != WB_SYNC_ALL)
3062 return true;
3063 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3064 return true;
3065 return false;
3066}
3067
Chao Yufc99fe22018-05-30 00:20:39 +08003068static int __f2fs_write_data_pages(struct address_space *mapping,
Chao Yub0af6d42017-08-02 23:21:48 +08003069 struct writeback_control *wbc,
3070 enum iostat_type io_type)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003071{
3072 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07003073 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07003074 struct blk_plug plug;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003075 int ret;
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003076 bool locked = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003077
P J Pcfb185a2013-04-03 11:38:00 +09003078 /* deal with chardevs and other special file */
3079 if (!mapping->a_ops->writepage)
3080 return 0;
3081
Chao Yu6a290542015-07-17 18:02:39 +08003082 /* skip writing if there is no dirty page in this inode */
3083 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3084 return 0;
3085
Chao Yu0771fcc2017-06-29 23:20:45 +08003086 /* during POR, we don't need to trigger writepage at all. */
3087 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3088 goto skip_write;
3089
Chao Yuaf033b22018-09-20 20:05:00 +08003090 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3091 wbc->sync_mode == WB_SYNC_NONE &&
Jaegeuk Kima1257022015-10-08 10:40:07 -07003092 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
Chao Yu4d57b862018-05-30 00:20:41 +08003093 f2fs_available_free_memory(sbi, DIRTY_DENTS))
Jaegeuk Kima1257022015-10-08 10:40:07 -07003094 goto skip_write;
3095
Chao Yud323d002015-10-27 09:53:45 +08003096 /* skip writing during file defragment */
Jaegeuk Kim91942322016-05-20 10:13:22 -07003097 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
Chao Yud323d002015-10-27 09:53:45 +08003098 goto skip_write;
3099
Yunlei Hed31c7c32016-02-04 16:14:00 +08003100 trace_f2fs_writepages(mapping->host, wbc, DATA);
3101
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003102 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3103 if (wbc->sync_mode == WB_SYNC_ALL)
Chao Yuc29fd0c2018-06-04 23:20:36 +08003104 atomic_inc(&sbi->wb_sync_req[DATA]);
3105 else if (atomic_read(&sbi->wb_sync_req[DATA]))
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003106 goto skip_write;
3107
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003108 if (__should_serialize_io(inode, wbc)) {
3109 mutex_lock(&sbi->writepages);
3110 locked = true;
3111 }
3112
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07003113 blk_start_plug(&plug);
Chao Yub0af6d42017-08-02 23:21:48 +08003114 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07003115 blk_finish_plug(&plug);
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003116
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003117 if (locked)
3118 mutex_unlock(&sbi->writepages);
3119
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003120 if (wbc->sync_mode == WB_SYNC_ALL)
Chao Yuc29fd0c2018-06-04 23:20:36 +08003121 atomic_dec(&sbi->wb_sync_req[DATA]);
Jaegeuk Kim28ea6162016-05-25 17:17:56 -07003122 /*
3123 * if some pages were truncated, we cannot guarantee its mapping->host
3124 * to detect pending bios.
3125 */
Jaegeuk Kim458e6192013-12-11 13:54:01 +09003126
Chao Yu4d57b862018-05-30 00:20:41 +08003127 f2fs_remove_dirty_inode(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003128 return ret;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09003129
3130skip_write:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07003131 wbc->pages_skipped += get_dirty_pages(inode);
Yunlei Hed31c7c32016-02-04 16:14:00 +08003132 trace_f2fs_writepages(mapping->host, wbc, DATA);
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09003133 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003134}
3135
Chao Yub0af6d42017-08-02 23:21:48 +08003136static int f2fs_write_data_pages(struct address_space *mapping,
3137 struct writeback_control *wbc)
3138{
3139 struct inode *inode = mapping->host;
3140
3141 return __f2fs_write_data_pages(mapping, wbc,
3142 F2FS_I(inode)->cp_task == current ?
3143 FS_CP_DATA_IO : FS_DATA_IO);
3144}
3145
Chao Yu3aab8f82014-07-02 13:25:04 +08003146static void f2fs_write_failed(struct address_space *mapping, loff_t to)
3147{
3148 struct inode *inode = mapping->host;
Jaegeuk Kim819d9152015-12-28 13:48:11 -08003149 loff_t i_size = i_size_read(inode);
Chao Yu3aab8f82014-07-02 13:25:04 +08003150
Jaegeuk Kim3f188c22019-12-03 18:54:29 -08003151 if (IS_NOQUOTA(inode))
3152 return;
3153
Eric Biggers95ae2512019-07-22 09:26:24 -07003154 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3155 if (to > i_size && !f2fs_verity_in_progress(inode)) {
Chao Yua33c1502018-08-05 23:04:25 +08003156 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09003157 down_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yua33c1502018-08-05 23:04:25 +08003158
Jaegeuk Kim819d9152015-12-28 13:48:11 -08003159 truncate_pagecache(inode, i_size);
Jaegeuk Kim3f188c22019-12-03 18:54:29 -08003160 f2fs_truncate_blocks(inode, i_size, true);
Chao Yua33c1502018-08-05 23:04:25 +08003161
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +08003162 up_write(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09003163 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yu3aab8f82014-07-02 13:25:04 +08003164 }
3165}
3166
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003167static int prepare_write_begin(struct f2fs_sb_info *sbi,
3168 struct page *page, loff_t pos, unsigned len,
3169 block_t *blk_addr, bool *node_changed)
3170{
3171 struct inode *inode = page->mapping->host;
3172 pgoff_t index = page->index;
3173 struct dnode_of_data dn;
3174 struct page *ipage;
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003175 bool locked = false;
Hou Pengyange15882b2017-02-23 09:18:05 +00003176 struct extent_info ei = {0,0,0};
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003177 int err = 0;
Sheng Yong2866fb12018-11-14 19:34:28 +08003178 int flag;
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003179
Jaegeuk Kim24b84912016-02-03 13:49:44 -08003180 /*
3181 * we already allocated all the blocks, so we don't need to get
3182 * the block addresses when there is no need to fill the page.
3183 */
Jaegeuk Kimdc91de72017-01-13 13:12:29 -08003184 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
Eric Biggers95ae2512019-07-22 09:26:24 -07003185 !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3186 !f2fs_verity_in_progress(inode))
Jaegeuk Kim24b84912016-02-03 13:49:44 -08003187 return 0;
3188
Sheng Yong2866fb12018-11-14 19:34:28 +08003189 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3190 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3191 flag = F2FS_GET_BLOCK_DEFAULT;
3192 else
3193 flag = F2FS_GET_BLOCK_PRE_AIO;
3194
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003195 if (f2fs_has_inline_data(inode) ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003196 (pos & PAGE_MASK) >= i_size_read(inode)) {
Chao Yu0ef81832020-06-18 14:36:22 +08003197 f2fs_do_map_lock(sbi, flag, true);
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003198 locked = true;
3199 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003200
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003201restart:
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003202 /* check inline_data */
Chao Yu4d57b862018-05-30 00:20:41 +08003203 ipage = f2fs_get_node_page(sbi, inode->i_ino);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003204 if (IS_ERR(ipage)) {
3205 err = PTR_ERR(ipage);
3206 goto unlock_out;
3207 }
3208
3209 set_new_dnode(&dn, inode, ipage, ipage, 0);
3210
3211 if (f2fs_has_inline_data(inode)) {
Chao Yuf2470372017-07-19 00:19:05 +08003212 if (pos + len <= MAX_INLINE_DATA(inode)) {
Chao Yu4d57b862018-05-30 00:20:41 +08003213 f2fs_do_read_inline_data(page, ipage);
Jaegeuk Kim91942322016-05-20 10:13:22 -07003214 set_inode_flag(inode, FI_DATA_EXIST);
Chao Yuab470362016-05-11 19:48:44 +08003215 if (inode->i_nlink)
3216 set_inline_node(ipage);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003217 } else {
3218 err = f2fs_convert_inline_page(&dn, page);
3219 if (err)
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003220 goto out;
3221 if (dn.data_blkaddr == NULL_ADDR)
3222 err = f2fs_get_block(&dn, index);
3223 }
3224 } else if (locked) {
3225 err = f2fs_get_block(&dn, index);
3226 } else {
3227 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3228 dn.data_blkaddr = ei.blk + index - ei.fofs;
3229 } else {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003230 /* hole case */
Chao Yu4d57b862018-05-30 00:20:41 +08003231 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim4da7bf52016-04-06 11:27:03 -07003232 if (err || dn.data_blkaddr == NULL_ADDR) {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003233 f2fs_put_dnode(&dn);
Chao Yu0ef81832020-06-18 14:36:22 +08003234 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
Yunlei He59c90812017-03-13 20:22:18 +08003235 true);
Sheng Yong2866fb12018-11-14 19:34:28 +08003236 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003237 locked = true;
3238 goto restart;
3239 }
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003240 }
3241 }
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003242
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003243 /* convert_inline_page can make node_changed */
3244 *blk_addr = dn.data_blkaddr;
3245 *node_changed = dn.node_changed;
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003246out:
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003247 f2fs_put_dnode(&dn);
3248unlock_out:
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003249 if (locked)
Chao Yu0ef81832020-06-18 14:36:22 +08003250 f2fs_do_map_lock(sbi, flag, false);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003251 return err;
3252}
3253
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003254static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3255 loff_t pos, unsigned len, unsigned flags,
3256 struct page **pagep, void **fsdata)
3257{
3258 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07003259 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07003260 struct page *page = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003261 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
Chao Yua2e2e762018-01-15 17:16:46 +08003262 bool need_balance = false, drop_atomic = false;
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003263 block_t blkaddr = NULL_ADDR;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003264 int err = 0;
3265
Chao Yu62aed042014-05-06 16:46:04 +08003266 trace_f2fs_write_begin(inode, pos, len, flags);
3267
Chao Yu00e09c02019-08-23 17:58:36 +08003268 if (!f2fs_is_checkpoint_ready(sbi)) {
3269 err = -ENOSPC;
Daniel Rosenberg43549942018-08-20 19:21:43 -07003270 goto fail;
Chao Yu00e09c02019-08-23 17:58:36 +08003271 }
Daniel Rosenberg43549942018-08-20 19:21:43 -07003272
Jaegeuk Kim455e3a52018-07-27 18:15:11 +09003273 if ((f2fs_is_atomic_file(inode) &&
3274 !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3275 is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
Jaegeuk Kim57864ae2017-10-18 19:05:57 -07003276 err = -ENOMEM;
Chao Yua2e2e762018-01-15 17:16:46 +08003277 drop_atomic = true;
Jaegeuk Kim57864ae2017-10-18 19:05:57 -07003278 goto fail;
3279 }
3280
Jaegeuk Kim5f727392014-11-25 10:59:45 -08003281 /*
3282 * We should check this at this moment to avoid deadlock on inode page
3283 * and #0 page. The locking rule for inline_data conversion should be:
3284 * lock_page(page #0) -> lock_page(inode_page)
3285 */
3286 if (index != 0) {
3287 err = f2fs_convert_inline_inode(inode);
3288 if (err)
3289 goto fail;
3290 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003291
3292#ifdef CONFIG_F2FS_FS_COMPRESSION
3293 if (f2fs_compressed_file(inode)) {
3294 int ret;
3295
3296 *fsdata = NULL;
3297
3298 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3299 index, fsdata);
3300 if (ret < 0) {
3301 err = ret;
3302 goto fail;
3303 } else if (ret) {
3304 return 0;
3305 }
3306 }
3307#endif
3308
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09003309repeat:
Jaegeuk Kim86d54792017-02-17 09:55:55 -08003310 /*
3311 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3312 * wait_for_stable_page. Will wait that below with our IO control.
3313 */
Chao Yu01eccef2017-10-28 16:52:30 +08003314 page = f2fs_pagecache_get_page(mapping, index,
Jaegeuk Kim86d54792017-02-17 09:55:55 -08003315 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
Chao Yu3aab8f82014-07-02 13:25:04 +08003316 if (!page) {
3317 err = -ENOMEM;
3318 goto fail;
3319 }
Jaegeuk Kimd5f66992014-04-30 09:22:45 +09003320
Chao Yu4c8ff702019-11-01 18:07:14 +08003321 /* TODO: cluster can be compressed due to race with .writepage */
3322
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003323 *pagep = page;
3324
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003325 err = prepare_write_begin(sbi, page, pos, len,
3326 &blkaddr, &need_balance);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07003327 if (err)
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003328 goto fail;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07003329
Chao Yuaf033b22018-09-20 20:05:00 +08003330 if (need_balance && !IS_NOQUOTA(inode) &&
3331 has_not_enough_free_secs(sbi, 0, 0)) {
Jaegeuk Kim2a340762015-12-22 13:23:35 -08003332 unlock_page(page);
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08003333 f2fs_balance_fs(sbi, true);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08003334 lock_page(page);
3335 if (page->mapping != mapping) {
3336 /* The page got truncated from under us */
3337 f2fs_put_page(page, 1);
3338 goto repeat;
3339 }
3340 }
3341
Chao Yubae0ee72018-12-25 17:43:42 +08003342 f2fs_wait_on_page_writeback(page, DATA, false, true);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07003343
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003344 if (len == PAGE_SIZE || PageUptodate(page))
3345 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003346
Eric Biggers95ae2512019-07-22 09:26:24 -07003347 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3348 !f2fs_verity_in_progress(inode)) {
Yunlei He746e2402016-12-20 11:11:35 +08003349 zero_user_segment(page, len, PAGE_SIZE);
3350 return 0;
3351 }
3352
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003353 if (blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003354 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003355 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003356 } else {
Chao Yu93770ab2019-04-15 15:26:32 +08003357 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3358 DATA_GENERIC_ENHANCE_READ)) {
Chao Yu10f966b2019-06-20 11:36:14 +08003359 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +08003360 goto fail;
3361 }
Jia Yangb7973092020-07-01 10:27:40 +08003362 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07003363 if (err)
Chao Yu3aab8f82014-07-02 13:25:04 +08003364 goto fail;
Chao Yud54c7952014-03-29 15:30:40 +08003365
Jaegeuk Kim393ff912013-03-08 21:29:23 +09003366 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09003367 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09003368 f2fs_put_page(page, 1);
3369 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003370 }
Chao Yu1563ac72016-07-03 22:05:12 +08003371 if (unlikely(!PageUptodate(page))) {
3372 err = -EIO;
3373 goto fail;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07003374 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003375 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003376 return 0;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07003377
Chao Yu3aab8f82014-07-02 13:25:04 +08003378fail:
Jaegeuk Kim86531d62015-07-15 13:08:21 -07003379 f2fs_put_page(page, 1);
Chao Yu3aab8f82014-07-02 13:25:04 +08003380 f2fs_write_failed(mapping, pos + len);
Chao Yua2e2e762018-01-15 17:16:46 +08003381 if (drop_atomic)
Chao Yu4d57b862018-05-30 00:20:41 +08003382 f2fs_drop_inmem_pages_all(sbi, false);
Chao Yu3aab8f82014-07-02 13:25:04 +08003383 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003384}
3385
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09003386static int f2fs_write_end(struct file *file,
3387 struct address_space *mapping,
3388 loff_t pos, unsigned len, unsigned copied,
3389 struct page *page, void *fsdata)
3390{
3391 struct inode *inode = page->mapping->host;
3392
Chao Yudfb2bf32014-05-06 16:47:23 +08003393 trace_f2fs_write_end(inode, pos, len, copied);
3394
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003395 /*
3396 * This should be come from len == PAGE_SIZE, and we expect copied
3397 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3398 * let generic_perform_write() try to copy data again through copied=0.
3399 */
3400 if (!PageUptodate(page)) {
Yunlei He746e2402016-12-20 11:11:35 +08003401 if (unlikely(copied != len))
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003402 copied = 0;
3403 else
3404 SetPageUptodate(page);
3405 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003406
3407#ifdef CONFIG_F2FS_FS_COMPRESSION
3408 /* overwrite compressed file */
3409 if (f2fs_compressed_file(inode) && fsdata) {
3410 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3411 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yu944dd22e2020-07-24 18:21:36 +08003412
3413 if (pos + copied > i_size_read(inode) &&
3414 !f2fs_verity_in_progress(inode))
3415 f2fs_i_size_write(inode, pos + copied);
Chao Yu4c8ff702019-11-01 18:07:14 +08003416 return copied;
3417 }
3418#endif
3419
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003420 if (!copied)
3421 goto unlock_out;
3422
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07003423 set_page_dirty(page);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09003424
Eric Biggers95ae2512019-07-22 09:26:24 -07003425 if (pos + copied > i_size_read(inode) &&
3426 !f2fs_verity_in_progress(inode))
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07003427 f2fs_i_size_write(inode, pos + copied);
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003428unlock_out:
Chao Yu3024c9a2016-08-06 21:09:41 +08003429 f2fs_put_page(page, 1);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08003430 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09003431 return copied;
3432}
3433
Omar Sandoval6f673762015-03-16 04:33:52 -07003434static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3435 loff_t offset)
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09003436{
Jaegeuk Kim8a56dd92018-06-29 18:55:12 -07003437 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3438 unsigned blkbits = i_blkbits;
3439 unsigned blocksize_mask = (1 << blkbits) - 1;
3440 unsigned long align = offset | iov_iter_alignment(iter);
3441 struct block_device *bdev = inode->i_sb->s_bdev;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09003442
Gabriel Krisman Bertazi20d0a102020-08-19 16:07:31 -04003443 if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3444 return 1;
3445
Jaegeuk Kim8a56dd92018-06-29 18:55:12 -07003446 if (align & blocksize_mask) {
3447 if (bdev)
3448 blkbits = blksize_bits(bdev_logical_block_size(bdev));
3449 blocksize_mask = (1 << blkbits) - 1;
3450 if (align & blocksize_mask)
3451 return -EINVAL;
3452 return 1;
3453 }
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09003454 return 0;
3455}
3456
Chao Yu02b16d02018-11-12 00:46:46 +08003457static void f2fs_dio_end_io(struct bio *bio)
3458{
3459 struct f2fs_private_dio *dio = bio->bi_private;
3460
3461 dec_page_count(F2FS_I_SB(dio->inode),
3462 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3463
3464 bio->bi_private = dio->orig_private;
3465 bio->bi_end_io = dio->orig_end_io;
3466
Chao Yuc8eb7022020-09-14 16:47:00 +08003467 kfree(dio);
Chao Yu02b16d02018-11-12 00:46:46 +08003468
3469 bio_endio(bio);
3470}
3471
3472static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3473 loff_t file_offset)
3474{
3475 struct f2fs_private_dio *dio;
3476 bool write = (bio_op(bio) == REQ_OP_WRITE);
Chao Yu02b16d02018-11-12 00:46:46 +08003477
3478 dio = f2fs_kzalloc(F2FS_I_SB(inode),
3479 sizeof(struct f2fs_private_dio), GFP_NOFS);
YueHaibing8e114032019-01-04 01:38:29 +00003480 if (!dio)
Chao Yu02b16d02018-11-12 00:46:46 +08003481 goto out;
Chao Yu02b16d02018-11-12 00:46:46 +08003482
3483 dio->inode = inode;
3484 dio->orig_end_io = bio->bi_end_io;
3485 dio->orig_private = bio->bi_private;
3486 dio->write = write;
3487
3488 bio->bi_end_io = f2fs_dio_end_io;
3489 bio->bi_private = dio;
3490
3491 inc_page_count(F2FS_I_SB(inode),
3492 write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3493
3494 submit_bio(bio);
3495 return;
3496out:
3497 bio->bi_status = BLK_STS_IOERR;
3498 bio_endio(bio);
3499}
3500
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003501static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003502{
Jaegeuk Kimb439b102016-02-03 13:09:09 -08003503 struct address_space *mapping = iocb->ki_filp->f_mapping;
Chao Yu3aab8f82014-07-02 13:25:04 +08003504 struct inode *inode = mapping->host;
Hyunchul Lee0cdd3192018-01-31 11:36:57 +09003505 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuf847c692018-09-27 18:34:52 +08003506 struct f2fs_inode_info *fi = F2FS_I(inode);
Chao Yu3aab8f82014-07-02 13:25:04 +08003507 size_t count = iov_iter_count(iter);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003508 loff_t offset = iocb->ki_pos;
Chao Yu82e0a5a2016-07-13 09:18:29 +08003509 int rw = iov_iter_rw(iter);
Chao Yu3aab8f82014-07-02 13:25:04 +08003510 int err;
Hyunchul Lee0cdd3192018-01-31 11:36:57 +09003511 enum rw_hint hint = iocb->ki_hint;
Chao Yu63189b72018-03-08 14:22:56 +08003512 int whint_mode = F2FS_OPTION(sbi).whint_mode;
Chao Yuf847c692018-09-27 18:34:52 +08003513 bool do_opu;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09003514
Jaegeuk Kimb439b102016-02-03 13:09:09 -08003515 err = check_direct_IO(inode, iter, offset);
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08003516 if (err)
Jaegeuk Kim8a56dd92018-06-29 18:55:12 -07003517 return err < 0 ? err : 0;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08003518
Chao Yuf847c692018-09-27 18:34:52 +08003519 if (f2fs_force_buffered_io(inode, iocb, iter))
Jaegeuk Kim36abef42016-06-03 19:29:38 -07003520 return 0;
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07003521
Chao Yuf847c692018-09-27 18:34:52 +08003522 do_opu = allow_outplace_dio(inode, iocb, iter);
3523
Jaegeuk Kim5302fb02016-07-22 15:25:47 -07003524 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
Chao Yu70407fa2014-07-31 21:11:22 +08003525
Hyunchul Lee0cdd3192018-01-31 11:36:57 +09003526 if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3527 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3528
Chao Yuf847c692018-09-27 18:34:52 +08003529 if (iocb->ki_flags & IOCB_NOWAIT) {
3530 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
Hyunchul Leeb91050a2018-03-08 19:34:38 +09003531 iocb->ki_hint = hint;
3532 err = -EAGAIN;
3533 goto out;
3534 }
Chao Yuf847c692018-09-27 18:34:52 +08003535 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3536 up_read(&fi->i_gc_rwsem[rw]);
3537 iocb->ki_hint = hint;
3538 err = -EAGAIN;
3539 goto out;
3540 }
3541 } else {
3542 down_read(&fi->i_gc_rwsem[rw]);
3543 if (do_opu)
3544 down_read(&fi->i_gc_rwsem[READ]);
Hyunchul Leeb91050a2018-03-08 19:34:38 +09003545 }
3546
Chao Yu02b16d02018-11-12 00:46:46 +08003547 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
Chao Yuf9d6d052018-11-13 14:33:45 +08003548 iter, rw == WRITE ? get_data_block_dio_write :
3549 get_data_block_dio, NULL, f2fs_dio_submit_bio,
DongDongJuad8d6a02020-03-20 15:01:32 +09003550 rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3551 DIO_SKIP_HOLES);
Chao Yuf847c692018-09-27 18:34:52 +08003552
3553 if (do_opu)
3554 up_read(&fi->i_gc_rwsem[READ]);
3555
3556 up_read(&fi->i_gc_rwsem[rw]);
Chao Yu82e0a5a2016-07-13 09:18:29 +08003557
3558 if (rw == WRITE) {
Hyunchul Lee0cdd3192018-01-31 11:36:57 +09003559 if (whint_mode == WHINT_MODE_OFF)
3560 iocb->ki_hint = hint;
Chao Yub0af6d42017-08-02 23:21:48 +08003561 if (err > 0) {
3562 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3563 err);
Chao Yuf847c692018-09-27 18:34:52 +08003564 if (!do_opu)
3565 set_inode_flag(inode, FI_UPDATE_WRITE);
Jack Qiu335cac82020-08-31 09:58:02 +08003566 } else if (err == -EIOCBQUEUED) {
3567 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3568 count - iov_iter_count(iter));
Chao Yub0af6d42017-08-02 23:21:48 +08003569 } else if (err < 0) {
Jaegeuk Kim6bfc4912016-04-18 17:07:44 -04003570 f2fs_write_failed(mapping, offset + count);
Chao Yub0af6d42017-08-02 23:21:48 +08003571 }
Chao Yu8b83ac82020-04-16 18:16:56 +08003572 } else {
3573 if (err > 0)
3574 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
Jack Qiu335cac82020-08-31 09:58:02 +08003575 else if (err == -EIOCBQUEUED)
3576 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3577 count - iov_iter_count(iter));
Jaegeuk Kim6bfc4912016-04-18 17:07:44 -04003578 }
Chao Yu70407fa2014-07-31 21:11:22 +08003579
Hyunchul Leeb91050a2018-03-08 19:34:38 +09003580out:
Jaegeuk Kim5302fb02016-07-22 15:25:47 -07003581 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
Chao Yu70407fa2014-07-31 21:11:22 +08003582
Chao Yu3aab8f82014-07-02 13:25:04 +08003583 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003584}
3585
Chao Yu487261f2015-02-05 17:44:29 +08003586void f2fs_invalidate_page(struct page *page, unsigned int offset,
3587 unsigned int length)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003588{
3589 struct inode *inode = page->mapping->host;
Chao Yu487261f2015-02-05 17:44:29 +08003590 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07003591
Chao Yu487261f2015-02-05 17:44:29 +08003592 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003593 (offset % PAGE_SIZE || length != PAGE_SIZE))
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07003594 return;
3595
Chao Yu487261f2015-02-05 17:44:29 +08003596 if (PageDirty(page)) {
Chao Yu933439c2016-10-11 22:57:01 +08003597 if (inode->i_ino == F2FS_META_INO(sbi)) {
Chao Yu487261f2015-02-05 17:44:29 +08003598 dec_page_count(sbi, F2FS_DIRTY_META);
Chao Yu933439c2016-10-11 22:57:01 +08003599 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
Chao Yu487261f2015-02-05 17:44:29 +08003600 dec_page_count(sbi, F2FS_DIRTY_NODES);
Chao Yu933439c2016-10-11 22:57:01 +08003601 } else {
Chao Yu487261f2015-02-05 17:44:29 +08003602 inode_dec_dirty_pages(inode);
Chao Yu4d57b862018-05-30 00:20:41 +08003603 f2fs_remove_dirty_inode(inode);
Chao Yu933439c2016-10-11 22:57:01 +08003604 }
Chao Yu487261f2015-02-05 17:44:29 +08003605 }
Chao Yudecd36b2015-08-07 18:42:09 +08003606
Chao Yu2baf0782018-07-27 18:15:16 +08003607 clear_cold_data(page);
3608
Chao Yudecd36b2015-08-07 18:42:09 +08003609 if (IS_ATOMIC_WRITTEN_PAGE(page))
Chao Yu4d57b862018-05-30 00:20:41 +08003610 return f2fs_drop_inmem_page(inode, page);
Chao Yudecd36b2015-08-07 18:42:09 +08003611
Chao Yu240a5912019-03-06 17:30:59 +08003612 f2fs_clear_page_private(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003613}
3614
Chao Yu487261f2015-02-05 17:44:29 +08003615int f2fs_release_page(struct page *page, gfp_t wait)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003616{
Jaegeuk Kimf68daee2015-01-30 11:39:08 -08003617 /* If this is dirty page, keep PagePrivate */
3618 if (PageDirty(page))
3619 return 0;
3620
Chao Yudecd36b2015-08-07 18:42:09 +08003621 /* This is atomic written page, keep Private */
3622 if (IS_ATOMIC_WRITTEN_PAGE(page))
3623 return 0;
3624
Chao Yu2baf0782018-07-27 18:15:16 +08003625 clear_cold_data(page);
Chao Yu240a5912019-03-06 17:30:59 +08003626 f2fs_clear_page_private(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +09003627 return 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003628}
3629
3630static int f2fs_set_data_page_dirty(struct page *page)
3631{
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003632 struct inode *inode = page_file_mapping(page)->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003633
Jaegeuk Kim26c6b882013-10-24 17:53:29 +09003634 trace_f2fs_set_page_dirty(page, DATA);
3635
Jaegeuk Kim237c0792016-06-30 18:49:15 -07003636 if (!PageUptodate(page))
3637 SetPageUptodate(page);
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003638 if (PageSwapCache(page))
3639 return __set_page_dirty_nobuffers(page);
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07003640
Chao Yu5fe45742017-01-07 18:50:26 +08003641 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
Chao Yudecd36b2015-08-07 18:42:09 +08003642 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
Chao Yu4d57b862018-05-30 00:20:41 +08003643 f2fs_register_inmem_page(inode, page);
Chao Yudecd36b2015-08-07 18:42:09 +08003644 return 1;
3645 }
3646 /*
3647 * Previously, this page has been registered, we just
3648 * return here.
3649 */
3650 return 0;
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07003651 }
3652
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003653 if (!PageDirty(page)) {
Jaegeuk Kimb87078a2018-04-20 19:29:52 -07003654 __set_page_dirty_nobuffers(page);
Chao Yu4d57b862018-05-30 00:20:41 +08003655 f2fs_update_dirty_page(inode, page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003656 return 1;
3657 }
3658 return 0;
3659}
3660
Chao Yuc1c63382020-03-30 17:13:29 +08003661
3662static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3663{
3664#ifdef CONFIG_F2FS_FS_COMPRESSION
3665 struct dnode_of_data dn;
3666 sector_t start_idx, blknr = 0;
3667 int ret;
3668
3669 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3670
3671 set_new_dnode(&dn, inode, NULL, NULL, 0);
3672 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3673 if (ret)
3674 return 0;
3675
3676 if (dn.data_blkaddr != COMPRESS_ADDR) {
3677 dn.ofs_in_node += block - start_idx;
3678 blknr = f2fs_data_blkaddr(&dn);
3679 if (!__is_valid_data_blkaddr(blknr))
3680 blknr = 0;
3681 }
3682
3683 f2fs_put_dnode(&dn);
Chao Yuc1c63382020-03-30 17:13:29 +08003684 return blknr;
3685#else
Chao Yu250e84d2020-06-28 20:29:38 +08003686 return 0;
Chao Yuc1c63382020-03-30 17:13:29 +08003687#endif
3688}
3689
3690
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09003691static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3692{
Chao Yu454ae7e2014-04-22 13:34:01 +08003693 struct inode *inode = mapping->host;
Chao Yub79b0a32020-06-29 20:13:12 +08003694 sector_t blknr = 0;
Chao Yu454ae7e2014-04-22 13:34:01 +08003695
Jaegeuk Kim1d373a02015-10-19 10:29:51 -07003696 if (f2fs_has_inline_data(inode))
Chao Yub79b0a32020-06-29 20:13:12 +08003697 goto out;
Jaegeuk Kim1d373a02015-10-19 10:29:51 -07003698
3699 /* make sure allocating whole blocks */
3700 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3701 filemap_write_and_wait(mapping);
3702
Daeho Jeong4eda1682020-08-31 09:24:01 +09003703 /* Block number less than F2FS MAX BLOCKS */
Chengguang Xu6d1451b2021-01-13 13:21:54 +08003704 if (unlikely(block >= max_file_blocks(inode)))
Daeho Jeong4eda1682020-08-31 09:24:01 +09003705 goto out;
Chao Yuc1c63382020-03-30 17:13:29 +08003706
Daeho Jeong4eda1682020-08-31 09:24:01 +09003707 if (f2fs_compressed_file(inode)) {
3708 blknr = f2fs_bmap_compress(inode, block);
3709 } else {
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08003710 struct f2fs_map_blocks map;
3711
3712 memset(&map, 0, sizeof(map));
3713 map.m_lblk = block;
3714 map.m_len = 1;
3715 map.m_next_pgofs = NULL;
3716 map.m_seg_type = NO_CHECK_TYPE;
3717
3718 if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3719 blknr = map.m_pblk;
Daeho Jeong4eda1682020-08-31 09:24:01 +09003720 }
Chao Yub79b0a32020-06-29 20:13:12 +08003721out:
3722 trace_f2fs_bmap(inode, block, blknr);
3723 return blknr;
Chao Yu429511c2015-02-05 17:54:31 +08003724}
3725
Weichao Guo5b7a4872016-09-20 05:03:27 +08003726#ifdef CONFIG_MIGRATION
3727#include <linux/migrate.h>
3728
3729int f2fs_migrate_page(struct address_space *mapping,
3730 struct page *newpage, struct page *page, enum migrate_mode mode)
3731{
3732 int rc, extra_count;
3733 struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3734 bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
3735
3736 BUG_ON(PageWriteback(page));
3737
3738 /* migrating an atomic written page is safe with the inmem_lock hold */
Jaegeuk Kimff1048e2017-07-06 14:46:01 -07003739 if (atomic_written) {
3740 if (mode != MIGRATE_SYNC)
3741 return -EBUSY;
3742 if (!mutex_trylock(&fi->inmem_lock))
3743 return -EAGAIN;
3744 }
Weichao Guo5b7a4872016-09-20 05:03:27 +08003745
Chao Yu240a5912019-03-06 17:30:59 +08003746 /* one extra reference was held for atomic_write page */
3747 extra_count = atomic_written ? 1 : 0;
Weichao Guo5b7a4872016-09-20 05:03:27 +08003748 rc = migrate_page_move_mapping(mapping, newpage,
Keith Busch37109692019-07-18 15:58:46 -07003749 page, extra_count);
Weichao Guo5b7a4872016-09-20 05:03:27 +08003750 if (rc != MIGRATEPAGE_SUCCESS) {
3751 if (atomic_written)
3752 mutex_unlock(&fi->inmem_lock);
3753 return rc;
3754 }
3755
3756 if (atomic_written) {
3757 struct inmem_pages *cur;
3758 list_for_each_entry(cur, &fi->inmem_pages, list)
3759 if (cur->page == page) {
3760 cur->page = newpage;
3761 break;
3762 }
3763 mutex_unlock(&fi->inmem_lock);
3764 put_page(page);
3765 get_page(newpage);
3766 }
3767
Chao Yu240a5912019-03-06 17:30:59 +08003768 if (PagePrivate(page)) {
3769 f2fs_set_page_private(newpage, page_private(page));
3770 f2fs_clear_page_private(page);
3771 }
Weichao Guo5b7a4872016-09-20 05:03:27 +08003772
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07003773 if (mode != MIGRATE_SYNC_NO_COPY)
3774 migrate_page_copy(newpage, page);
3775 else
3776 migrate_page_states(newpage, page);
Weichao Guo5b7a4872016-09-20 05:03:27 +08003777
3778 return MIGRATEPAGE_SUCCESS;
3779}
3780#endif
3781
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003782#ifdef CONFIG_SWAP
Chao Yuaf4b6b82020-10-12 17:06:05 +08003783static int check_swap_activate_fast(struct swap_info_struct *sis,
3784 struct file *swap_file, sector_t *span)
3785{
3786 struct address_space *mapping = swap_file->f_mapping;
3787 struct inode *inode = mapping->host;
3788 sector_t cur_lblock;
3789 sector_t last_lblock;
3790 sector_t pblock;
3791 sector_t lowest_pblock = -1;
3792 sector_t highest_pblock = 0;
3793 int nr_extents = 0;
3794 unsigned long nr_pblocks;
Jaegeuk Kim963ba7f2020-11-24 15:09:07 -08003795 u64 len;
Chao Yuaf4b6b82020-10-12 17:06:05 +08003796 int ret;
3797
3798 /*
3799 * Map all the blocks into the extent list. This code doesn't try
3800 * to be very smart.
3801 */
3802 cur_lblock = 0;
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08003803 last_lblock = bytes_to_blks(inode, i_size_read(inode));
Chao Yuaf4b6b82020-10-12 17:06:05 +08003804 len = i_size_read(inode);
3805
3806 while (cur_lblock <= last_lblock && cur_lblock < sis->max) {
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08003807 struct f2fs_map_blocks map;
Chao Yuaf4b6b82020-10-12 17:06:05 +08003808 pgoff_t next_pgofs;
3809
3810 cond_resched();
3811
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08003812 memset(&map, 0, sizeof(map));
3813 map.m_lblk = cur_lblock;
3814 map.m_len = bytes_to_blks(inode, len) - cur_lblock;
3815 map.m_next_pgofs = &next_pgofs;
3816 map.m_seg_type = NO_CHECK_TYPE;
Chao Yuaf4b6b82020-10-12 17:06:05 +08003817
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08003818 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
Chao Yuaf4b6b82020-10-12 17:06:05 +08003819 if (ret)
3820 goto err_out;
3821
3822 /* hole */
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08003823 if (!(map.m_flags & F2FS_MAP_FLAGS))
Chao Yuaf4b6b82020-10-12 17:06:05 +08003824 goto err_out;
3825
Jaegeuk Kimb876f4c2020-11-24 15:19:10 -08003826 pblock = map.m_pblk;
3827 nr_pblocks = map.m_len;
Chao Yuaf4b6b82020-10-12 17:06:05 +08003828
3829 if (cur_lblock + nr_pblocks >= sis->max)
3830 nr_pblocks = sis->max - cur_lblock;
3831
3832 if (cur_lblock) { /* exclude the header page */
3833 if (pblock < lowest_pblock)
3834 lowest_pblock = pblock;
3835 if (pblock + nr_pblocks - 1 > highest_pblock)
3836 highest_pblock = pblock + nr_pblocks - 1;
3837 }
3838
3839 /*
3840 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3841 */
3842 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
3843 if (ret < 0)
3844 goto out;
3845 nr_extents += ret;
3846 cur_lblock += nr_pblocks;
3847 }
3848 ret = nr_extents;
3849 *span = 1 + highest_pblock - lowest_pblock;
3850 if (cur_lblock == 0)
3851 cur_lblock = 1; /* force Empty message */
3852 sis->max = cur_lblock;
3853 sis->pages = cur_lblock - 1;
3854 sis->highest_bit = cur_lblock - 1;
3855out:
3856 return ret;
3857err_out:
3858 pr_err("swapon: swapfile has holes\n");
3859 return -EINVAL;
3860}
3861
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003862/* Copied from generic_swapfile_activate() to check any holes */
Chao Yu3e5e4792019-12-27 18:44:56 +08003863static int check_swap_activate(struct swap_info_struct *sis,
3864 struct file *swap_file, sector_t *span)
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003865{
3866 struct address_space *mapping = swap_file->f_mapping;
3867 struct inode *inode = mapping->host;
3868 unsigned blocks_per_page;
3869 unsigned long page_no;
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003870 sector_t probe_block;
3871 sector_t last_block;
3872 sector_t lowest_block = -1;
3873 sector_t highest_block = 0;
Chao Yu3e5e4792019-12-27 18:44:56 +08003874 int nr_extents = 0;
3875 int ret;
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003876
Chao Yuaf4b6b82020-10-12 17:06:05 +08003877 if (PAGE_SIZE == F2FS_BLKSIZE)
3878 return check_swap_activate_fast(sis, swap_file, span);
3879
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08003880 blocks_per_page = bytes_to_blks(inode, PAGE_SIZE);
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003881
3882 /*
3883 * Map all the blocks into the extent list. This code doesn't try
3884 * to be very smart.
3885 */
3886 probe_block = 0;
3887 page_no = 0;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08003888 last_block = bytes_to_blks(inode, i_size_read(inode));
Chao Yu3e5e4792019-12-27 18:44:56 +08003889 while ((probe_block + blocks_per_page) <= last_block &&
3890 page_no < sis->max) {
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003891 unsigned block_in_page;
3892 sector_t first_block;
Carlos Maiolino30460e12020-01-09 14:30:41 +01003893 sector_t block = 0;
3894 int err = 0;
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003895
3896 cond_resched();
3897
Carlos Maiolino30460e12020-01-09 14:30:41 +01003898 block = probe_block;
3899 err = bmap(inode, &block);
3900 if (err || !block)
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003901 goto bad_bmap;
Carlos Maiolino30460e12020-01-09 14:30:41 +01003902 first_block = block;
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003903
3904 /*
3905 * It must be PAGE_SIZE aligned on-disk
3906 */
3907 if (first_block & (blocks_per_page - 1)) {
3908 probe_block++;
3909 goto reprobe;
3910 }
3911
3912 for (block_in_page = 1; block_in_page < blocks_per_page;
3913 block_in_page++) {
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003914
Carlos Maiolino30460e12020-01-09 14:30:41 +01003915 block = probe_block + block_in_page;
3916 err = bmap(inode, &block);
3917
3918 if (err || !block)
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003919 goto bad_bmap;
Carlos Maiolino30460e12020-01-09 14:30:41 +01003920
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003921 if (block != first_block + block_in_page) {
3922 /* Discontiguity */
3923 probe_block++;
3924 goto reprobe;
3925 }
3926 }
3927
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08003928 first_block >>= (PAGE_SHIFT - inode->i_blkbits);
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003929 if (page_no) { /* exclude the header page */
3930 if (first_block < lowest_block)
3931 lowest_block = first_block;
3932 if (first_block > highest_block)
3933 highest_block = first_block;
3934 }
3935
Chao Yu3e5e4792019-12-27 18:44:56 +08003936 /*
3937 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3938 */
3939 ret = add_swap_extent(sis, page_no, 1, first_block);
3940 if (ret < 0)
3941 goto out;
3942 nr_extents += ret;
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003943 page_no++;
3944 probe_block += blocks_per_page;
3945reprobe:
3946 continue;
3947 }
Chao Yu3e5e4792019-12-27 18:44:56 +08003948 ret = nr_extents;
3949 *span = 1 + highest_block - lowest_block;
3950 if (page_no == 0)
3951 page_no = 1; /* force Empty message */
3952 sis->max = page_no;
3953 sis->pages = page_no - 1;
3954 sis->highest_bit = page_no - 1;
3955out:
3956 return ret;
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003957bad_bmap:
3958 pr_err("swapon: swapfile has holes\n");
3959 return -EINVAL;
3960}
3961
3962static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
3963 sector_t *span)
3964{
3965 struct inode *inode = file_inode(file);
3966 int ret;
3967
3968 if (!S_ISREG(inode->i_mode))
3969 return -EINVAL;
3970
3971 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3972 return -EROFS;
3973
3974 ret = f2fs_convert_inline_inode(inode);
3975 if (ret)
3976 return ret;
3977
Daeho Jeong78134d02020-09-08 11:44:11 +09003978 if (!f2fs_disable_compressed_file(inode))
Chao Yu4c8ff702019-11-01 18:07:14 +08003979 return -EINVAL;
3980
Chao Yu0b979f12020-12-26 18:07:41 +08003981 f2fs_precache_extents(inode);
3982
Chao Yu3e5e4792019-12-27 18:44:56 +08003983 ret = check_swap_activate(sis, file, span);
3984 if (ret < 0)
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003985 return ret;
3986
3987 set_inode_flag(inode, FI_PIN_FILE);
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003988 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yu3e5e4792019-12-27 18:44:56 +08003989 return ret;
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003990}
3991
3992static void f2fs_swap_deactivate(struct file *file)
3993{
3994 struct inode *inode = file_inode(file);
3995
3996 clear_inode_flag(inode, FI_PIN_FILE);
3997}
3998#else
3999static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4000 sector_t *span)
4001{
4002 return -EOPNOTSUPP;
4003}
4004
4005static void f2fs_swap_deactivate(struct file *file)
4006{
4007}
4008#endif
4009
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004010const struct address_space_operations f2fs_dblock_aops = {
4011 .readpage = f2fs_read_data_page,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07004012 .readahead = f2fs_readahead,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004013 .writepage = f2fs_write_data_page,
4014 .writepages = f2fs_write_data_pages,
4015 .write_begin = f2fs_write_begin,
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09004016 .write_end = f2fs_write_end,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004017 .set_page_dirty = f2fs_set_data_page_dirty,
Chao Yu487261f2015-02-05 17:44:29 +08004018 .invalidatepage = f2fs_invalidate_page,
4019 .releasepage = f2fs_release_page,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004020 .direct_IO = f2fs_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09004021 .bmap = f2fs_bmap,
Jaegeuk Kim4969c062019-07-01 19:15:29 -07004022 .swap_activate = f2fs_swap_activate,
4023 .swap_deactivate = f2fs_swap_deactivate,
Weichao Guo5b7a4872016-09-20 05:03:27 +08004024#ifdef CONFIG_MIGRATION
4025 .migratepage = f2fs_migrate_page,
4026#endif
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004027};
Eric Biggers6dbb1792018-04-18 11:09:48 -07004028
Matthew Wilcox5ec2d992017-12-04 20:25:25 -05004029void f2fs_clear_page_cache_dirty_tag(struct page *page)
Chao Yuaec2f722018-05-26 18:03:35 +08004030{
4031 struct address_space *mapping = page_mapping(page);
4032 unsigned long flags;
4033
4034 xa_lock_irqsave(&mapping->i_pages, flags);
Matthew Wilcox5ec2d992017-12-04 20:25:25 -05004035 __xa_clear_mark(&mapping->i_pages, page_index(page),
Chao Yuaec2f722018-05-26 18:03:35 +08004036 PAGECACHE_TAG_DIRTY);
4037 xa_unlock_irqrestore(&mapping->i_pages, flags);
4038}
4039
Eric Biggers6dbb1792018-04-18 11:09:48 -07004040int __init f2fs_init_post_read_processing(void)
4041{
Eric Biggers95ae2512019-07-22 09:26:24 -07004042 bio_post_read_ctx_cache =
4043 kmem_cache_create("f2fs_bio_post_read_ctx",
4044 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
Eric Biggers6dbb1792018-04-18 11:09:48 -07004045 if (!bio_post_read_ctx_cache)
4046 goto fail;
4047 bio_post_read_ctx_pool =
4048 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4049 bio_post_read_ctx_cache);
4050 if (!bio_post_read_ctx_pool)
4051 goto fail_free_cache;
4052 return 0;
4053
4054fail_free_cache:
4055 kmem_cache_destroy(bio_post_read_ctx_cache);
4056fail:
4057 return -ENOMEM;
4058}
4059
Chao Yu0b20fce2019-09-30 18:53:25 +08004060void f2fs_destroy_post_read_processing(void)
Eric Biggers6dbb1792018-04-18 11:09:48 -07004061{
4062 mempool_destroy(bio_post_read_ctx_pool);
4063 kmem_cache_destroy(bio_post_read_ctx_cache);
4064}
Chao Yu0b20fce2019-09-30 18:53:25 +08004065
Chao Yu4c8ff702019-11-01 18:07:14 +08004066int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4067{
4068 if (!f2fs_sb_has_encrypt(sbi) &&
4069 !f2fs_sb_has_verity(sbi) &&
4070 !f2fs_sb_has_compression(sbi))
4071 return 0;
4072
4073 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4074 WQ_UNBOUND | WQ_HIGHPRI,
4075 num_online_cpus());
4076 if (!sbi->post_read_wq)
4077 return -ENOMEM;
4078 return 0;
4079}
4080
4081void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4082{
4083 if (sbi->post_read_wq)
4084 destroy_workqueue(sbi->post_read_wq);
4085}
4086
Chao Yu0b20fce2019-09-30 18:53:25 +08004087int __init f2fs_init_bio_entry_cache(void)
4088{
Chao Yu98510002020-02-17 17:46:20 +08004089 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
Chao Yu0b20fce2019-09-30 18:53:25 +08004090 sizeof(struct bio_entry));
4091 if (!bio_entry_slab)
4092 return -ENOMEM;
4093 return 0;
4094}
4095
Chao Yuf5438052019-12-04 09:52:58 +08004096void f2fs_destroy_bio_entry_cache(void)
Chao Yu0b20fce2019-09-30 18:53:25 +08004097{
4098 kmem_cache_destroy(bio_entry_slab);
4099}