blob: 3c98ef6af97d139db476e060afb35514d8e18d58 [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09003 * fs/f2fs/file.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09007 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/stat.h>
11#include <linux/buffer_head.h>
12#include <linux/writeback.h>
Jaegeuk Kimae51fb32013-03-16 11:13:04 +090013#include <linux/blkdev.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090014#include <linux/falloc.h>
15#include <linux/types.h>
Namjae Jeone9750822013-02-04 23:41:41 +090016#include <linux/compat.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090017#include <linux/uaccess.h>
18#include <linux/mount.h>
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +090019#include <linux/pagevec.h>
Jaegeuk Kimdc91de72017-01-13 13:12:29 -080020#include <linux/uio.h>
Andy Shevchenko8da4b8c2016-05-20 17:01:00 -070021#include <linux/uuid.h>
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -070022#include <linux/file.h>
Chao Yu4507847c2019-07-17 17:06:11 +080023#include <linux/nls.h>
Daeho Jeong9af84642020-07-21 12:21:11 +090024#include <linux/sched/signal.h>
Miklos Szeredi9b1bb012021-04-07 14:36:43 +020025#include <linux/fileattr.h>
Daeho Jeong0f6b56e2021-08-02 21:22:45 -070026#include <linux/fadvise.h>
Eric Biggersa1e09b02021-07-23 00:59:21 -070027#include <linux/iomap.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090028
29#include "f2fs.h"
30#include "node.h"
31#include "segment.h"
32#include "xattr.h"
33#include "acl.h"
Chao Yuc1c1b582015-07-10 18:08:10 +080034#include "gc.h"
Daeho Jeong52118742021-08-19 20:52:28 -070035#include "iostat.h"
Namjae Jeona2a4a7e2013-04-20 01:28:40 +090036#include <trace/events/f2fs.h>
Chao Yufa4320c2020-11-02 14:21:31 +080037#include <uapi/linux/f2fs.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090038
Souptick Joarderea4d4792018-04-15 01:40:02 +053039static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +080040{
41 struct inode *inode = file_inode(vmf->vma->vm_file);
Souptick Joarderea4d4792018-04-15 01:40:02 +053042 vm_fault_t ret;
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +080043
Souptick Joarderea4d4792018-04-15 01:40:02 +053044 ret = filemap_fault(vmf);
Chao Yu8b83ac82020-04-16 18:16:56 +080045 if (!ret)
46 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
47 F2FS_BLKSIZE);
48
Chao Yud7648342019-04-15 15:22:19 +080049 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
50
Souptick Joarderea4d4792018-04-15 01:40:02 +053051 return ret;
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +080052}
53
Souptick Joarderea4d4792018-04-15 01:40:02 +053054static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090055{
56 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -080057 struct inode *inode = file_inode(vmf->vma->vm_file);
Jaegeuk Kim40813632014-09-02 15:31:18 -070058 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimbdf03292019-12-03 15:53:16 -080059 struct dnode_of_data dn;
Chao Yu4c8ff702019-11-01 18:07:14 +080060 bool need_alloc = true;
61 int err = 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090062
Chao Yue0fcd012020-12-26 18:07:01 +080063 if (unlikely(IS_IMMUTABLE(inode)))
64 return VM_FAULT_SIGBUS;
65
Jaegeuk Kimc6140412021-05-25 11:39:35 -070066 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
67 return VM_FAULT_SIGBUS;
68
Jaegeuk Kim1f227a32017-10-23 23:48:49 +020069 if (unlikely(f2fs_cp_error(sbi))) {
70 err = -EIO;
71 goto err;
72 }
73
Chao Yu00e09c02019-08-23 17:58:36 +080074 if (!f2fs_is_checkpoint_ready(sbi)) {
75 err = -ENOSPC;
Chao Yu955ebcd2019-07-22 17:57:05 +080076 goto err;
Chao Yu00e09c02019-08-23 17:58:36 +080077 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090078
Chao Yuc8e43d52021-02-02 16:03:11 +080079 err = f2fs_convert_inline_inode(inode);
80 if (err)
81 goto err;
82
Chao Yu4c8ff702019-11-01 18:07:14 +080083#ifdef CONFIG_F2FS_FS_COMPRESSION
84 if (f2fs_compressed_file(inode)) {
85 int ret = f2fs_is_compressed_cluster(inode, page->index);
86
87 if (ret < 0) {
88 err = ret;
89 goto err;
90 } else if (ret) {
Chao Yu4c8ff702019-11-01 18:07:14 +080091 need_alloc = false;
92 }
93 }
94#endif
Jaegeuk Kimbdf03292019-12-03 15:53:16 -080095 /* should do out of any locked page */
Chao Yu4c8ff702019-11-01 18:07:14 +080096 if (need_alloc)
97 f2fs_balance_fs(sbi, true);
Jaegeuk Kimbdf03292019-12-03 15:53:16 -080098
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090099 sb_start_pagefault(inode->i_sb);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700100
101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
Jaegeuk Kimb067ba1f2014-08-07 16:32:25 -0700102
Dave Jiang11bac802017-02-24 14:56:41 -0800103 file_update_time(vmf->vma->vm_file);
Jan Karaedc6d012021-04-13 18:10:37 +0200104 filemap_invalidate_lock_shared(inode->i_mapping);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900105 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900106 if (unlikely(page->mapping != inode->i_mapping ||
Namjae Jeon9851e6e2013-04-28 09:04:18 +0900107 page_offset(page) > i_size_read(inode) ||
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900108 !PageUptodate(page))) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900109 unlock_page(page);
110 err = -EFAULT;
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +0800111 goto out_sem;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900112 }
113
Chao Yu4c8ff702019-11-01 18:07:14 +0800114 if (need_alloc) {
115 /* block allocation */
Chao Yu0ef81832020-06-18 14:36:22 +0800116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
Chao Yu4c8ff702019-11-01 18:07:14 +0800117 set_new_dnode(&dn, inode, NULL, NULL, 0);
118 err = f2fs_get_block(&dn, page->index);
Chao Yu0ef81832020-06-18 14:36:22 +0800119 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
Chao Yu39a86952018-09-27 18:33:18 +0800120 }
121
Chao Yu06c75402020-03-02 17:34:27 +0800122#ifdef CONFIG_F2FS_FS_COMPRESSION
123 if (!need_alloc) {
124 set_new_dnode(&dn, inode, NULL, NULL, 0);
125 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
126 f2fs_put_dnode(&dn);
127 }
128#endif
129 if (err) {
130 unlock_page(page);
131 goto out_sem;
Chao Yu39a86952018-09-27 18:33:18 +0800132 }
133
Chao Yubae0ee72018-12-25 17:43:42 +0800134 f2fs_wait_on_page_writeback(page, DATA, false, true);
Chao Yu39a86952018-09-27 18:33:18 +0800135
136 /* wait for GCed page writeback via META_MAPPING */
137 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
138
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900139 /*
140 * check to see if the page is mapped already (no holes)
141 */
142 if (PageMappedToDisk(page))
Chao Yu39a86952018-09-27 18:33:18 +0800143 goto out_sem;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900144
145 /* page is wholly or partially inside EOF */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300146 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
Chao Yu9edcdab2015-09-11 14:43:52 +0800147 i_size_read(inode)) {
youngjun yoo193bea12018-05-30 04:21:14 +0900148 loff_t offset;
youngjun yoof11e98b2018-05-30 04:33:07 +0900149
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300150 offset = i_size_read(inode) & ~PAGE_MASK;
151 zero_user_segment(page, offset, PAGE_SIZE);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900152 }
153 set_page_dirty(page);
Jaegeuk Kim237c0792016-06-30 18:49:15 -0700154 if (!PageUptodate(page))
155 SetPageUptodate(page);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900156
Chao Yub0af6d42017-08-02 23:21:48 +0800157 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
Sahitya Tummalac75f2fe2018-10-05 10:47:39 +0530158 f2fs_update_time(sbi, REQ_TIME);
Chao Yub0af6d42017-08-02 23:21:48 +0800159
Jaegeuk Kime943a102013-10-25 14:26:31 +0900160 trace_f2fs_vm_page_mkwrite(page, DATA);
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +0800161out_sem:
Jan Karaedc6d012021-04-13 18:10:37 +0200162 filemap_invalidate_unlock_shared(inode->i_mapping);
Chao Yu39a86952018-09-27 18:33:18 +0800163
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900164 sb_end_pagefault(inode->i_sb);
Jaegeuk Kim1f227a32017-10-23 23:48:49 +0200165err:
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900166 return block_page_mkwrite_return(err);
167}
168
169static const struct vm_operations_struct f2fs_file_vm_ops = {
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +0800170 .fault = f2fs_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700171 .map_pages = filemap_map_pages,
Jaegeuk Kim692bb552013-01-17 18:37:41 +0900172 .page_mkwrite = f2fs_vm_page_mkwrite,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900173};
174
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900175static int get_parent_ino(struct inode *inode, nid_t *pino)
176{
177 struct dentry *dentry;
178
Eric Biggers84c9c2d2020-05-05 11:41:11 -0700179 /*
180 * Make sure to get the non-deleted alias. The alias associated with
181 * the open file descriptor being fsync()'ed may be deleted already.
182 */
183 dentry = d_find_alias(inode);
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900184 if (!dentry)
185 return 0;
186
Jaegeuk Kimf0947e52013-07-22 22:12:56 +0900187 *pino = parent_ino(dentry);
188 dput(dentry);
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900189 return 1;
190}
191
Chao Yua5fd5052017-11-06 22:51:45 +0800192static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
Chao Yu9d1589e2014-08-20 18:37:35 +0800193{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700194 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yua5fd5052017-11-06 22:51:45 +0800195 enum cp_reason_type cp_reason = CP_NO_NEEDED;
Chao Yu9d1589e2014-08-20 18:37:35 +0800196
Chao Yua5fd5052017-11-06 22:51:45 +0800197 if (!S_ISREG(inode->i_mode))
198 cp_reason = CP_NON_REGULAR;
Chao Yu4c8ff702019-11-01 18:07:14 +0800199 else if (f2fs_compressed_file(inode))
200 cp_reason = CP_COMPRESSED;
Chao Yua5fd5052017-11-06 22:51:45 +0800201 else if (inode->i_nlink != 1)
202 cp_reason = CP_HARDLINK;
Jaegeuk Kimbbf156f2016-08-29 18:23:45 -0700203 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
Chao Yua5fd5052017-11-06 22:51:45 +0800204 cp_reason = CP_SB_NEED_CP;
Chao Yu9d1589e2014-08-20 18:37:35 +0800205 else if (file_wrong_pino(inode))
Chao Yua5fd5052017-11-06 22:51:45 +0800206 cp_reason = CP_WRONG_PINO;
Chao Yu4d57b862018-05-30 00:20:41 +0800207 else if (!f2fs_space_for_roll_forward(sbi))
Chao Yua5fd5052017-11-06 22:51:45 +0800208 cp_reason = CP_NO_SPC_ROLL;
Chao Yu4d57b862018-05-30 00:20:41 +0800209 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
Chao Yua5fd5052017-11-06 22:51:45 +0800210 cp_reason = CP_NODE_NEED_CP;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700211 else if (test_opt(sbi, FASTBOOT))
Chao Yua5fd5052017-11-06 22:51:45 +0800212 cp_reason = CP_FASTBOOT_MODE;
Chao Yu63189b72018-03-08 14:22:56 +0800213 else if (F2FS_OPTION(sbi).active_logs == 2)
Chao Yua5fd5052017-11-06 22:51:45 +0800214 cp_reason = CP_SPEC_LOG_NUM;
Chao Yu63189b72018-03-08 14:22:56 +0800215 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
Chao Yu4d57b862018-05-30 00:20:41 +0800216 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
217 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
218 TRANS_DIR_INO))
Jaegeuk Kim0a007b972017-12-28 08:09:44 -0800219 cp_reason = CP_RECOVER_DIR;
Chao Yu9d1589e2014-08-20 18:37:35 +0800220
Chao Yua5fd5052017-11-06 22:51:45 +0800221 return cp_reason;
Chao Yu9d1589e2014-08-20 18:37:35 +0800222}
223
Changman Lee9c7bb702014-12-08 15:29:40 +0900224static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
225{
226 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
227 bool ret = false;
228 /* But we need to avoid that there are some inode updates */
Chao Yu4d57b862018-05-30 00:20:41 +0800229 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
Changman Lee9c7bb702014-12-08 15:29:40 +0900230 ret = true;
231 f2fs_put_page(i, 0);
232 return ret;
233}
234
Changman Lee51455b12014-12-08 15:29:41 +0900235static void try_to_fix_pino(struct inode *inode)
236{
237 struct f2fs_inode_info *fi = F2FS_I(inode);
238 nid_t pino;
239
240 down_write(&fi->i_sem);
Changman Lee51455b12014-12-08 15:29:41 +0900241 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
242 get_parent_ino(inode, &pino)) {
Jaegeuk Kim205b9822016-05-20 09:52:20 -0700243 f2fs_i_pino_write(inode, pino);
Changman Lee51455b12014-12-08 15:29:41 +0900244 file_got_pino(inode);
Changman Lee51455b12014-12-08 15:29:41 +0900245 }
Jaegeuk Kimee6d1822016-05-20 16:32:49 -0700246 up_write(&fi->i_sem);
Changman Lee51455b12014-12-08 15:29:41 +0900247}
248
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700249static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
250 int datasync, bool atomic)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900251{
252 struct inode *inode = file->f_mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -0700253 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim2403c152014-09-10 14:58:18 -0700254 nid_t ino = inode->i_ino;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900255 int ret = 0;
Chao Yua5fd5052017-11-06 22:51:45 +0800256 enum cp_reason_type cp_reason = 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900257 struct writeback_control wbc = {
Jaegeuk Kimc81bf1c2014-03-03 11:28:40 +0900258 .sync_mode = WB_SYNC_ALL,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900259 .nr_to_write = LONG_MAX,
260 .for_reclaim = 0,
261 };
Chao Yu50fa53e2018-08-02 23:03:19 +0800262 unsigned int seq_id = 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900263
Jaegeuk Kimdddd3d62021-08-19 14:00:57 -0700264 if (unlikely(f2fs_readonly(inode->i_sb)))
Namjae Jeon1fa95b02012-12-01 10:56:01 +0900265 return 0;
266
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900267 trace_f2fs_sync_file_enter(inode);
Jaegeuk Kimea1aa122014-07-24 19:11:43 -0700268
Yunlei Heb61ac5b72018-11-06 10:25:29 +0800269 if (S_ISDIR(inode->i_mode))
270 goto go_write;
271
Jaegeuk Kimea1aa122014-07-24 19:11:43 -0700272 /* if fdatasync is triggered, let's do in-place-update */
Jaegeuk Kimc46a1552015-12-31 13:49:17 -0800273 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
Jaegeuk Kim91942322016-05-20 10:13:22 -0700274 set_inode_flag(inode, FI_NEED_IPU);
Jeff Layton3b49c9a2017-07-07 15:20:52 -0400275 ret = file_write_and_wait_range(file, start, end);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700276 clear_inode_flag(inode, FI_NEED_IPU);
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -0700277
Jaegeuk Kimdddd3d62021-08-19 14:00:57 -0700278 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
Chao Yua5fd5052017-11-06 22:51:45 +0800279 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900280 return ret;
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900281 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900282
Changman Lee9c7bb702014-12-08 15:29:40 +0900283 /* if the inode is dirty, let's recover all the time */
Chao Yu281518c2016-11-17 20:53:31 +0800284 if (!f2fs_skip_inode_update(inode, datasync)) {
Jaegeuk Kim2286c022015-08-15 21:51:05 -0700285 f2fs_write_inode(inode, NULL);
Changman Lee9c7bb702014-12-08 15:29:40 +0900286 goto go_write;
287 }
288
Jaegeuk Kim6d99ba412014-07-24 19:08:02 -0700289 /*
290 * if there is no written data, don't waste time to write recovery info.
291 */
Jaegeuk Kim91942322016-05-20 10:13:22 -0700292 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
Chao Yu4d57b862018-05-30 00:20:41 +0800293 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700294
Changman Lee9c7bb702014-12-08 15:29:40 +0900295 /* it may call write_inode just prior to fsync */
296 if (need_inode_page_update(sbi, ino))
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700297 goto go_write;
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700298
Jaegeuk Kim91942322016-05-20 10:13:22 -0700299 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
Chao Yu4d57b862018-05-30 00:20:41 +0800300 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
Jaegeuk Kim6d99ba412014-07-24 19:08:02 -0700301 goto flush_out;
302 goto out;
Chao Yu27879912021-07-20 09:03:29 +0800303 } else {
304 /*
305 * for OPU case, during fsync(), node can be persisted before
306 * data when lower device doesn't support write barrier, result
307 * in data corruption after SPO.
308 * So for strict fsync mode, force to use atomic write sematics
309 * to keep write order in between data/node and last node to
310 * avoid potential data corruption.
311 */
312 if (F2FS_OPTION(sbi).fsync_mode ==
313 FSYNC_MODE_STRICT && !atomic)
314 atomic = true;
Jaegeuk Kim6d99ba412014-07-24 19:08:02 -0700315 }
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700316go_write:
Jaegeuk Kime5d23852013-07-03 10:55:52 +0900317 /*
318 * Both of fdatasync() and fsync() are able to be recovered from
319 * sudden-power-off.
320 */
Jaegeuk Kim91942322016-05-20 10:13:22 -0700321 down_read(&F2FS_I(inode)->i_sem);
Chao Yua5fd5052017-11-06 22:51:45 +0800322 cp_reason = need_do_checkpoint(inode);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700323 up_read(&F2FS_I(inode)->i_sem);
Jaegeuk Kimd928bfb2014-03-20 19:10:08 +0900324
Chao Yua5fd5052017-11-06 22:51:45 +0800325 if (cp_reason) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900326 /* all the dirty node pages should be flushed for POR */
327 ret = f2fs_sync_fs(inode->i_sb, 1);
Jaegeuk Kimd928bfb2014-03-20 19:10:08 +0900328
Changman Lee51455b12014-12-08 15:29:41 +0900329 /*
330 * We've secured consistency through sync_fs. Following pino
331 * will be used only for fsynced inodes after checkpoint.
332 */
333 try_to_fix_pino(inode);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700334 clear_inode_flag(inode, FI_APPEND_WRITE);
335 clear_inode_flag(inode, FI_UPDATE_WRITE);
Changman Lee51455b12014-12-08 15:29:41 +0900336 goto out;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900337 }
Changman Lee51455b12014-12-08 15:29:41 +0900338sync_nodes:
Chao Yuc29fd0c2018-06-04 23:20:36 +0800339 atomic_inc(&sbi->wb_sync_req[NODE]);
Chao Yu50fa53e2018-08-02 23:03:19 +0800340 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
Chao Yuc29fd0c2018-06-04 23:20:36 +0800341 atomic_dec(&sbi->wb_sync_req[NODE]);
Jaegeuk Kimc267ec12016-04-15 09:25:04 -0700342 if (ret)
343 goto out;
Changman Lee51455b12014-12-08 15:29:41 +0900344
Jaegeuk Kim871f5992015-01-09 16:27:17 -0800345 /* if cp_error was enabled, we should avoid infinite loop */
Chao Yu6d5a1492015-12-24 18:04:56 +0800346 if (unlikely(f2fs_cp_error(sbi))) {
347 ret = -EIO;
Jaegeuk Kim871f5992015-01-09 16:27:17 -0800348 goto out;
Chao Yu6d5a1492015-12-24 18:04:56 +0800349 }
Jaegeuk Kim871f5992015-01-09 16:27:17 -0800350
Chao Yu4d57b862018-05-30 00:20:41 +0800351 if (f2fs_need_inode_block_update(sbi, ino)) {
Jaegeuk Kim7c457292016-10-14 11:51:23 -0700352 f2fs_mark_inode_dirty_sync(inode, true);
Changman Lee51455b12014-12-08 15:29:41 +0900353 f2fs_write_inode(inode, NULL);
354 goto sync_nodes;
355 }
356
Jaegeuk Kimb6a245e2017-07-28 02:29:12 -0700357 /*
358 * If it's atomic_write, it's just fine to keep write ordering. So
359 * here we don't need to wait for node write completion, since we use
360 * node chain which serializes node blocks. If one of node writes are
361 * reordered, we can see simply broken chain, resulting in stopping
362 * roll-forward recovery. It means we'll recover all or none node blocks
363 * given fsync mark.
364 */
365 if (!atomic) {
Chao Yu50fa53e2018-08-02 23:03:19 +0800366 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
Jaegeuk Kimb6a245e2017-07-28 02:29:12 -0700367 if (ret)
368 goto out;
369 }
Changman Lee51455b12014-12-08 15:29:41 +0900370
371 /* once recovery info is written, don't need to tack this */
Chao Yu4d57b862018-05-30 00:20:41 +0800372 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700373 clear_inode_flag(inode, FI_APPEND_WRITE);
Changman Lee51455b12014-12-08 15:29:41 +0900374flush_out:
Jaegeuk Kimd6290812018-05-25 18:02:58 -0700375 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
Chao Yu39d787b2017-09-29 13:59:38 +0800376 ret = f2fs_issue_flush(sbi, inode->i_ino);
Chao Yu3f062522017-09-29 13:59:36 +0800377 if (!ret) {
Chao Yu4d57b862018-05-30 00:20:41 +0800378 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
Chao Yu3f062522017-09-29 13:59:36 +0800379 clear_inode_flag(inode, FI_UPDATE_WRITE);
Chao Yu4d57b862018-05-30 00:20:41 +0800380 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
Chao Yu3f062522017-09-29 13:59:36 +0800381 }
Jaegeuk Kimd0239e12016-01-08 16:57:48 -0800382 f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900383out:
Chao Yua5fd5052017-11-06 22:51:45 +0800384 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900385 return ret;
386}
387
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700388int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
389{
Jaegeuk Kim1f227a32017-10-23 23:48:49 +0200390 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
391 return -EIO;
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700392 return f2fs_do_sync_file(file, start, end, datasync, false);
393}
394
Matthew Wilcox (Oracle)4cb03fe2020-08-24 22:48:41 +0100395static bool __found_offset(struct address_space *mapping, block_t blkaddr,
396 pgoff_t index, int whence)
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900397{
398 switch (whence) {
399 case SEEK_DATA:
Matthew Wilcox (Oracle)4cb03fe2020-08-24 22:48:41 +0100400 if (__is_valid_data_blkaddr(blkaddr))
401 return true;
402 if (blkaddr == NEW_ADDR &&
403 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900404 return true;
405 break;
406 case SEEK_HOLE:
407 if (blkaddr == NULL_ADDR)
408 return true;
409 break;
410 }
411 return false;
412}
413
Chao Yu267378d2014-04-23 14:10:24 +0800414static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
415{
416 struct inode *inode = file->f_mapping->host;
417 loff_t maxbytes = inode->i_sb->s_maxbytes;
418 struct dnode_of_data dn;
Matthew Wilcox (Oracle)4cb03fe2020-08-24 22:48:41 +0100419 pgoff_t pgofs, end_offset;
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900420 loff_t data_ofs = offset;
421 loff_t isize;
Chao Yu267378d2014-04-23 14:10:24 +0800422 int err = 0;
423
Al Viro59551022016-01-22 15:40:57 -0500424 inode_lock(inode);
Chao Yu267378d2014-04-23 14:10:24 +0800425
426 isize = i_size_read(inode);
427 if (offset >= isize)
428 goto fail;
429
430 /* handle inline data case */
Chao Yu7a6e59d2020-11-02 17:36:58 +0800431 if (f2fs_has_inline_data(inode)) {
432 if (whence == SEEK_HOLE) {
433 data_ofs = isize;
434 goto found;
435 } else if (whence == SEEK_DATA) {
436 data_ofs = offset;
437 goto found;
438 }
Chao Yu267378d2014-04-23 14:10:24 +0800439 }
440
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300441 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
Chao Yu267378d2014-04-23 14:10:24 +0800442
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300443 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
Chao Yu267378d2014-04-23 14:10:24 +0800444 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +0800445 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
Chao Yu267378d2014-04-23 14:10:24 +0800446 if (err && err != -ENOENT) {
447 goto fail;
448 } else if (err == -ENOENT) {
arter97e1c42042014-08-06 23:22:50 +0900449 /* direct node does not exists */
Chao Yu267378d2014-04-23 14:10:24 +0800450 if (whence == SEEK_DATA) {
Chao Yu4d57b862018-05-30 00:20:41 +0800451 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
Chao Yu267378d2014-04-23 14:10:24 +0800452 continue;
453 } else {
454 goto found;
455 }
456 }
457
Chao Yu81ca7352016-01-26 15:39:35 +0800458 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Chao Yu267378d2014-04-23 14:10:24 +0800459
460 /* find data/hole in dnode block */
461 for (; dn.ofs_in_node < end_offset;
462 dn.ofs_in_node++, pgofs++,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300463 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
Chao Yu267378d2014-04-23 14:10:24 +0800464 block_t blkaddr;
youngjun yoof11e98b2018-05-30 04:33:07 +0900465
Chao Yua2ced1c2020-02-14 17:44:10 +0800466 blkaddr = f2fs_data_blkaddr(&dn);
Chao Yu267378d2014-04-23 14:10:24 +0800467
Chao Yuc9b60782018-08-01 19:13:44 +0800468 if (__is_valid_data_blkaddr(blkaddr) &&
469 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
Chao Yu93770ab2019-04-15 15:26:32 +0800470 blkaddr, DATA_GENERIC_ENHANCE)) {
Chao Yuc9b60782018-08-01 19:13:44 +0800471 f2fs_put_dnode(&dn);
472 goto fail;
473 }
474
Matthew Wilcox (Oracle)4cb03fe2020-08-24 22:48:41 +0100475 if (__found_offset(file->f_mapping, blkaddr,
Chao Yue1da7872018-06-05 17:44:11 +0800476 pgofs, whence)) {
Chao Yu267378d2014-04-23 14:10:24 +0800477 f2fs_put_dnode(&dn);
478 goto found;
479 }
480 }
481 f2fs_put_dnode(&dn);
482 }
483
484 if (whence == SEEK_DATA)
485 goto fail;
Chao Yu267378d2014-04-23 14:10:24 +0800486found:
Jaegeuk Kimfe369bc2014-04-28 17:02:48 +0900487 if (whence == SEEK_HOLE && data_ofs > isize)
488 data_ofs = isize;
Al Viro59551022016-01-22 15:40:57 -0500489 inode_unlock(inode);
Chao Yu267378d2014-04-23 14:10:24 +0800490 return vfs_setpos(file, data_ofs, maxbytes);
491fail:
Al Viro59551022016-01-22 15:40:57 -0500492 inode_unlock(inode);
Chao Yu267378d2014-04-23 14:10:24 +0800493 return -ENXIO;
494}
495
496static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
497{
498 struct inode *inode = file->f_mapping->host;
499 loff_t maxbytes = inode->i_sb->s_maxbytes;
500
Chengguang Xu6d1451b2021-01-13 13:21:54 +0800501 if (f2fs_compressed_file(inode))
502 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
503
Chao Yu267378d2014-04-23 14:10:24 +0800504 switch (whence) {
505 case SEEK_SET:
506 case SEEK_CUR:
507 case SEEK_END:
508 return generic_file_llseek_size(file, offset, whence,
509 maxbytes, i_size_read(inode));
510 case SEEK_DATA:
511 case SEEK_HOLE:
Jaegeuk Kim0b4c5afde2014-09-08 10:59:43 -0700512 if (offset < 0)
513 return -ENXIO;
Chao Yu267378d2014-04-23 14:10:24 +0800514 return f2fs_seek_block(file, offset, whence);
515 }
516
517 return -EINVAL;
518}
519
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900520static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
521{
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700522 struct inode *inode = file_inode(file);
523
Jaegeuk Kim1f227a32017-10-23 23:48:49 +0200524 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
525 return -EIO;
526
Chao Yu4c8ff702019-11-01 18:07:14 +0800527 if (!f2fs_is_compress_backend_ready(inode))
528 return -EOPNOTSUPP;
529
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900530 file_accessed(file);
531 vma->vm_ops = &f2fs_file_vm_ops;
Chao Yu4c8ff702019-11-01 18:07:14 +0800532 set_inode_flag(inode, FI_MMAP_FILE);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900533 return 0;
534}
535
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -0700536static int f2fs_file_open(struct inode *inode, struct file *filp)
537{
Eric Biggers2e168c82017-11-29 12:35:28 -0800538 int err = fscrypt_file_open(inode, filp);
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -0700539
Eric Biggers2e168c82017-11-29 12:35:28 -0800540 if (err)
541 return err;
Hyunchul Leeb91050a2018-03-08 19:34:38 +0900542
Chao Yu4c8ff702019-11-01 18:07:14 +0800543 if (!f2fs_is_compress_backend_ready(inode))
544 return -EOPNOTSUPP;
545
Eric Biggers95ae2512019-07-22 09:26:24 -0700546 err = fsverity_file_open(inode, filp);
547 if (err)
548 return err;
549
Hyunchul Leeb91050a2018-03-08 19:34:38 +0900550 filp->f_mode |= FMODE_NOWAIT;
551
Chao Yu0abd6752017-07-09 00:13:07 +0800552 return dquot_file_open(inode, filp);
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -0700553}
554
Chao Yu4d57b862018-05-30 00:20:41 +0800555void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900556{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700557 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900558 struct f2fs_node *raw_node;
Chao Yu19b2c302015-08-26 20:34:48 +0800559 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900560 __le32 *addr;
Chao Yu7a2af762017-07-19 00:19:06 +0800561 int base = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +0800562 bool compressed_cluster = false;
563 int cluster_index = 0, valid_blocks = 0;
564 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
Daeho Jeongc2759eb2020-09-08 11:44:10 +0900565 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
Chao Yu7a2af762017-07-19 00:19:06 +0800566
567 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
568 base = get_extra_isize(dn->inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900569
Gu Zheng45590712013-07-15 17:57:38 +0800570 raw_node = F2FS_NODE(dn->node_page);
Chao Yu7a2af762017-07-19 00:19:06 +0800571 addr = blkaddr_in_node(raw_node) + base + ofs;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900572
Chao Yu4c8ff702019-11-01 18:07:14 +0800573 /* Assumption: truncateion starts with cluster */
574 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900575 block_t blkaddr = le32_to_cpu(*addr);
youngjun yoof11e98b2018-05-30 04:33:07 +0900576
Chao Yu4c8ff702019-11-01 18:07:14 +0800577 if (f2fs_compressed_file(dn->inode) &&
578 !(cluster_index & (cluster_size - 1))) {
579 if (compressed_cluster)
580 f2fs_i_compr_blocks_update(dn->inode,
581 valid_blocks, false);
582 compressed_cluster = (blkaddr == COMPRESS_ADDR);
583 valid_blocks = 0;
584 }
585
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900586 if (blkaddr == NULL_ADDR)
587 continue;
588
Jaegeuk Kime1509cf2014-12-30 22:57:55 -0800589 dn->data_blkaddr = NULL_ADDR;
Chao Yu4d57b862018-05-30 00:20:41 +0800590 f2fs_set_data_blkaddr(dn);
Chao Yuc9b60782018-08-01 19:13:44 +0800591
Chao Yu4c8ff702019-11-01 18:07:14 +0800592 if (__is_valid_data_blkaddr(blkaddr)) {
593 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
Chao Yu93770ab2019-04-15 15:26:32 +0800594 DATA_GENERIC_ENHANCE))
Chao Yu4c8ff702019-11-01 18:07:14 +0800595 continue;
596 if (compressed_cluster)
597 valid_blocks++;
598 }
Chao Yuc9b60782018-08-01 19:13:44 +0800599
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -0700600 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
Jaegeuk Kim91942322016-05-20 10:13:22 -0700601 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
Chao Yu4c8ff702019-11-01 18:07:14 +0800602
603 f2fs_invalidate_blocks(sbi, blkaddr);
Chao Yuef8d5632020-03-06 15:36:09 +0800604
605 if (!released || blkaddr != COMPRESS_ADDR)
606 nr_free++;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900607 }
Chao Yu19b2c302015-08-26 20:34:48 +0800608
Chao Yu4c8ff702019-11-01 18:07:14 +0800609 if (compressed_cluster)
610 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
611
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900612 if (nr_free) {
Chao Yu19b2c302015-08-26 20:34:48 +0800613 pgoff_t fofs;
614 /*
615 * once we invalidate valid blkaddr in range [ofs, ofs + count],
616 * we will invalidate all blkaddr in the whole range.
617 */
Chao Yu4d57b862018-05-30 00:20:41 +0800618 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
Chao Yu81ca7352016-01-26 15:39:35 +0800619 dn->inode) + ofs;
Chao Yu19b2c302015-08-26 20:34:48 +0800620 f2fs_update_extent_cache_range(dn, fofs, 0, len);
Namjae Jeond7cc9502013-06-08 21:25:40 +0900621 dec_valid_block_count(sbi, dn->inode, nr_free);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900622 }
623 dn->ofs_in_node = ofs;
Namjae Jeon51dd6242013-04-20 01:28:52 +0900624
Jaegeuk Kimd0239e12016-01-08 16:57:48 -0800625 f2fs_update_time(sbi, REQ_TIME);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900626 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
627 dn->ofs_in_node, nr_free);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900628}
629
Chao Yu4d57b862018-05-30 00:20:41 +0800630void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900631{
Chao Yud02a6e62019-03-25 21:08:19 +0800632 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900633}
634
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800635static int truncate_partial_data_page(struct inode *inode, u64 from,
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700636 bool cache_only)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900637{
youngjun yoo193bea12018-05-30 04:21:14 +0900638 loff_t offset = from & (PAGE_SIZE - 1);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300639 pgoff_t index = from >> PAGE_SHIFT;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700640 struct address_space *mapping = inode->i_mapping;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900641 struct page *page;
642
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700643 if (!offset && !cache_only)
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700644 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900645
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700646 if (cache_only) {
Jaegeuk Kim34b5d5c2016-09-06 15:55:54 -0700647 page = find_lock_page(mapping, index);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700648 if (page && PageUptodate(page))
649 goto truncate_out;
650 f2fs_put_page(page, 1);
651 return 0;
652 }
653
Chao Yu4d57b862018-05-30 00:20:41 +0800654 page = f2fs_get_lock_data_page(inode, index, true);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900655 if (IS_ERR(page))
Yunlei Hea78aaa22017-02-28 20:32:41 +0800656 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700657truncate_out:
Chao Yubae0ee72018-12-25 17:43:42 +0800658 f2fs_wait_on_page_writeback(page, DATA, true, true);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300659 zero_user(page, offset, PAGE_SIZE - offset);
Jaegeuk Kima9bcf9b2017-06-14 08:05:32 -0700660
661 /* An encrypted inode should have a key and truncate the last page. */
Chandan Rajendra62230e0d2018-12-12 15:20:11 +0530662 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
Jaegeuk Kima9bcf9b2017-06-14 08:05:32 -0700663 if (!cache_only)
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800664 set_page_dirty(page);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900665 f2fs_put_page(page, 1);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700666 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900667}
668
Chao Yu3265d3d2020-03-18 16:22:59 +0800669int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900670{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700671 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900672 struct dnode_of_data dn;
673 pgoff_t free_from;
Huajun Li9ffe0fb2013-11-10 23:13:20 +0800674 int count = 0, err = 0;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700675 struct page *ipage;
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800676 bool truncate_page = false;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900677
Namjae Jeon51dd6242013-04-20 01:28:52 +0900678 trace_f2fs_truncate_blocks_enter(inode, from);
679
Chao Yudf033ca2018-03-20 23:08:29 +0800680 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900681
Chengguang Xu6d1451b2021-01-13 13:21:54 +0800682 if (free_from >= max_file_blocks(inode))
Chao Yu09210c92016-05-05 19:13:03 +0800683 goto free_partial;
684
Jaegeuk Kim764aa3e2014-08-14 16:32:54 -0700685 if (lock)
Chao Yuc42d28c2019-02-02 17:33:01 +0800686 f2fs_lock_op(sbi);
Huajun Li9ffe0fb2013-11-10 23:13:20 +0800687
Chao Yu4d57b862018-05-30 00:20:41 +0800688 ipage = f2fs_get_node_page(sbi, inode->i_ino);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700689 if (IS_ERR(ipage)) {
690 err = PTR_ERR(ipage);
691 goto out;
692 }
693
694 if (f2fs_has_inline_data(inode)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800695 f2fs_truncate_inline_inode(inode, ipage, from);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700696 f2fs_put_page(ipage, 1);
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800697 truncate_page = true;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700698 goto out;
699 }
700
701 set_new_dnode(&dn, inode, ipage, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +0800702 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900703 if (err) {
704 if (err == -ENOENT)
705 goto free_next;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700706 goto out;
Jaegeuk Kim1ce86bf2014-10-15 10:24:34 -0700707 }
708
Chao Yu81ca7352016-01-26 15:39:35 +0800709 count = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900710
711 count -= dn.ofs_in_node;
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700712 f2fs_bug_on(sbi, count < 0);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900713
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900714 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800715 f2fs_truncate_data_blocks_range(&dn, count);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900716 free_from += count;
717 }
718
719 f2fs_put_dnode(&dn);
720free_next:
Chao Yu4d57b862018-05-30 00:20:41 +0800721 err = f2fs_truncate_inode_blocks(inode, free_from);
Jaegeuk Kim764d2c82014-11-11 11:01:01 -0800722out:
723 if (lock)
Chao Yuc42d28c2019-02-02 17:33:01 +0800724 f2fs_unlock_op(sbi);
Chao Yu09210c92016-05-05 19:13:03 +0800725free_partial:
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700726 /* lastly zero out the first data page */
727 if (!err)
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800728 err = truncate_partial_data_page(inode, from, truncate_page);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900729
Namjae Jeon51dd6242013-04-20 01:28:52 +0900730 trace_f2fs_truncate_blocks_exit(inode, err);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900731 return err;
732}
733
Chao Yu4c8ff702019-11-01 18:07:14 +0800734int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
735{
736 u64 free_from = from;
Chao Yu3265d3d2020-03-18 16:22:59 +0800737 int err;
Chao Yu4c8ff702019-11-01 18:07:14 +0800738
Chao Yu3265d3d2020-03-18 16:22:59 +0800739#ifdef CONFIG_F2FS_FS_COMPRESSION
Chao Yu4c8ff702019-11-01 18:07:14 +0800740 /*
741 * for compressed file, only support cluster size
742 * aligned truncation.
743 */
Chao Yu4fec3fc02020-04-08 19:55:17 +0800744 if (f2fs_compressed_file(inode))
745 free_from = round_up(from,
746 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
Chao Yu3265d3d2020-03-18 16:22:59 +0800747#endif
Chao Yu4c8ff702019-11-01 18:07:14 +0800748
Chao Yu3265d3d2020-03-18 16:22:59 +0800749 err = f2fs_do_truncate_blocks(inode, free_from, lock);
750 if (err)
751 return err;
752
753#ifdef CONFIG_F2FS_FS_COMPRESSION
Fengnan Chang4a4fc042021-08-09 10:21:04 +0800754 /*
755 * For compressed file, after release compress blocks, don't allow write
756 * direct, but we should allow write direct after truncate to zero.
757 */
758 if (f2fs_compressed_file(inode) && !free_from
759 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
760 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
761
Chao Yu17d76482020-08-10 18:38:45 +0800762 if (from != free_from) {
Chao Yu3265d3d2020-03-18 16:22:59 +0800763 err = f2fs_truncate_partial_cluster(inode, from, lock);
Chao Yu17d76482020-08-10 18:38:45 +0800764 if (err)
765 return err;
766 }
Chao Yu3265d3d2020-03-18 16:22:59 +0800767#endif
768
Chao Yu17d76482020-08-10 18:38:45 +0800769 return 0;
Chao Yu4c8ff702019-11-01 18:07:14 +0800770}
771
Jaegeuk Kim9a449e92016-06-02 13:49:38 -0700772int f2fs_truncate(struct inode *inode)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900773{
Chao Yub0154892015-08-24 17:39:42 +0800774 int err;
775
Jaegeuk Kim1f227a32017-10-23 23:48:49 +0200776 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
777 return -EIO;
778
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900779 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
780 S_ISLNK(inode->i_mode)))
Chao Yub0154892015-08-24 17:39:42 +0800781 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900782
Namjae Jeon51dd6242013-04-20 01:28:52 +0900783 trace_f2fs_truncate(inode);
784
Jaegeuk Kim14b44d22017-03-09 15:24:24 -0800785 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
Chao Yuc45d6002019-11-01 17:53:23 +0800786 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
Jaegeuk Kim14b44d22017-03-09 15:24:24 -0800787 return -EIO;
788 }
Arnd Bergmann7fa750a2018-08-13 23:38:06 +0200789
Chao Yu10a26872021-10-28 21:03:05 +0800790 err = f2fs_dquot_initialize(inode);
Yi Chen25fb04d2021-01-28 17:02:56 +0800791 if (err)
792 return err;
793
Jaegeuk Kim92dffd02014-11-11 14:10:01 -0800794 /* we should check inline_data size */
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800795 if (!f2fs_may_inline_data(inode)) {
Chao Yub0154892015-08-24 17:39:42 +0800796 err = f2fs_convert_inline_inode(inode);
797 if (err)
798 return err;
Jaegeuk Kim92dffd02014-11-11 14:10:01 -0800799 }
800
Chao Yuc42d28c2019-02-02 17:33:01 +0800801 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
Chao Yub0154892015-08-24 17:39:42 +0800802 if (err)
803 return err;
804
Deepa Dinamani078cd822016-09-14 07:48:04 -0700805 inode->i_mtime = inode->i_ctime = current_time(inode);
Jaegeuk Kim7c457292016-10-14 11:51:23 -0700806 f2fs_mark_inode_dirty_sync(inode, false);
Chao Yub0154892015-08-24 17:39:42 +0800807 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900808}
809
Christian Brauner549c7292021-01-21 14:19:43 +0100810int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
811 struct kstat *stat, u32 request_mask, unsigned int query_flags)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900812{
David Howellsa528d352017-01-31 16:46:22 +0000813 struct inode *inode = d_inode(path->dentry);
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800814 struct f2fs_inode_info *fi = F2FS_I(inode);
Chao Yu1c1d35d2018-01-25 14:54:42 +0800815 struct f2fs_inode *ri;
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800816 unsigned int flags;
817
Chao Yu1c1d35d2018-01-25 14:54:42 +0800818 if (f2fs_has_extra_attr(inode) &&
Chao Yu7beb01f2018-10-24 18:34:26 +0800819 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
Chao Yu1c1d35d2018-01-25 14:54:42 +0800820 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
821 stat->result_mask |= STATX_BTIME;
822 stat->btime.tv_sec = fi->i_crtime.tv_sec;
823 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
824 }
825
Eric Biggers36098552019-06-04 22:59:04 -0700826 flags = fi->i_flags;
Chao Yufd267252020-03-25 10:22:09 +0800827 if (flags & F2FS_COMPR_FL)
828 stat->attributes |= STATX_ATTR_COMPRESSED;
Chao Yu59c84402018-04-03 15:08:17 +0800829 if (flags & F2FS_APPEND_FL)
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800830 stat->attributes |= STATX_ATTR_APPEND;
Chandan Rajendra62230e0d2018-12-12 15:20:11 +0530831 if (IS_ENCRYPTED(inode))
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800832 stat->attributes |= STATX_ATTR_ENCRYPTED;
Chao Yu59c84402018-04-03 15:08:17 +0800833 if (flags & F2FS_IMMUTABLE_FL)
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800834 stat->attributes |= STATX_ATTR_IMMUTABLE;
Chao Yu59c84402018-04-03 15:08:17 +0800835 if (flags & F2FS_NODUMP_FL)
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800836 stat->attributes |= STATX_ATTR_NODUMP;
Eric Biggers924e3192019-10-29 13:41:40 -0700837 if (IS_VERITY(inode))
838 stat->attributes |= STATX_ATTR_VERITY;
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800839
Chao Yufd267252020-03-25 10:22:09 +0800840 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
841 STATX_ATTR_APPEND |
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800842 STATX_ATTR_ENCRYPTED |
843 STATX_ATTR_IMMUTABLE |
Eric Biggers924e3192019-10-29 13:41:40 -0700844 STATX_ATTR_NODUMP |
845 STATX_ATTR_VERITY);
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800846
Christian Brauner0d56a452021-01-21 14:19:30 +0100847 generic_fillattr(&init_user_ns, inode, stat);
Jaegeuk Kim5b4267d2017-10-13 10:27:45 -0700848
849 /* we need to show initial sectors used for inline_data/dentries */
850 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
851 f2fs_has_inline_dentry(inode))
852 stat->blocks += (stat->size + 511) >> 9;
853
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900854 return 0;
855}
856
857#ifdef CONFIG_F2FS_FS_POSIX_ACL
Christian Braunere65ce2a2021-01-21 14:19:27 +0100858static void __setattr_copy(struct user_namespace *mnt_userns,
859 struct inode *inode, const struct iattr *attr)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900860{
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900861 unsigned int ia_valid = attr->ia_valid;
862
863 if (ia_valid & ATTR_UID)
864 inode->i_uid = attr->ia_uid;
865 if (ia_valid & ATTR_GID)
866 inode->i_gid = attr->ia_gid;
Amir Goldsteineb31e2f2019-11-24 21:31:45 +0200867 if (ia_valid & ATTR_ATIME)
868 inode->i_atime = attr->ia_atime;
869 if (ia_valid & ATTR_MTIME)
870 inode->i_mtime = attr->ia_mtime;
871 if (ia_valid & ATTR_CTIME)
872 inode->i_ctime = attr->ia_ctime;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900873 if (ia_valid & ATTR_MODE) {
874 umode_t mode = attr->ia_mode;
Christian Brauner2f221d62021-01-21 14:19:26 +0100875 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900876
Linus Torvalds7d6beb72021-02-23 13:39:45 -0800877 if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900878 mode &= ~S_ISGID;
Jaegeuk Kim91942322016-05-20 10:13:22 -0700879 set_acl_inode(inode, mode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900880 }
881}
882#else
883#define __setattr_copy setattr_copy
884#endif
885
Christian Brauner549c7292021-01-21 14:19:43 +0100886int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
887 struct iattr *attr)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900888{
David Howells2b0143b2015-03-17 22:25:59 +0000889 struct inode *inode = d_inode(dentry);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900890 int err;
891
Jaegeuk Kim1f227a32017-10-23 23:48:49 +0200892 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
893 return -EIO;
894
Chao Yue0fcd012020-12-26 18:07:01 +0800895 if (unlikely(IS_IMMUTABLE(inode)))
896 return -EPERM;
897
898 if (unlikely(IS_APPEND(inode) &&
899 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
900 ATTR_GID | ATTR_TIMES_SET))))
901 return -EPERM;
902
Chao Yu4c8ff702019-11-01 18:07:14 +0800903 if ((attr->ia_valid & ATTR_SIZE) &&
904 !f2fs_is_compress_backend_ready(inode))
905 return -EOPNOTSUPP;
906
Christian Brauner2f221d62021-01-21 14:19:26 +0100907 err = setattr_prepare(&init_user_ns, dentry, attr);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900908 if (err)
909 return err;
910
Eric Biggers20bb2472017-11-29 12:35:32 -0800911 err = fscrypt_prepare_setattr(dentry, attr);
912 if (err)
913 return err;
914
Eric Biggers95ae2512019-07-22 09:26:24 -0700915 err = fsverity_prepare_setattr(dentry, attr);
916 if (err)
917 return err;
918
Chao Yu0abd6752017-07-09 00:13:07 +0800919 if (is_quota_modification(inode, attr)) {
Chao Yu10a26872021-10-28 21:03:05 +0800920 err = f2fs_dquot_initialize(inode);
Chao Yu0abd6752017-07-09 00:13:07 +0800921 if (err)
922 return err;
923 }
924 if ((attr->ia_valid & ATTR_UID &&
925 !uid_eq(attr->ia_uid, inode->i_uid)) ||
926 (attr->ia_valid & ATTR_GID &&
927 !gid_eq(attr->ia_gid, inode->i_gid))) {
Chao Yuaf033b22018-09-20 20:05:00 +0800928 f2fs_lock_op(F2FS_I_SB(inode));
Chao Yu0abd6752017-07-09 00:13:07 +0800929 err = dquot_transfer(inode, attr);
Chao Yuaf033b22018-09-20 20:05:00 +0800930 if (err) {
931 set_sbi_flag(F2FS_I_SB(inode),
932 SBI_QUOTA_NEED_REPAIR);
933 f2fs_unlock_op(F2FS_I_SB(inode));
Chao Yu0abd6752017-07-09 00:13:07 +0800934 return err;
Chao Yuaf033b22018-09-20 20:05:00 +0800935 }
936 /*
937 * update uid/gid under lock_op(), so that dquot and inode can
938 * be updated atomically.
939 */
940 if (attr->ia_valid & ATTR_UID)
941 inode->i_uid = attr->ia_uid;
942 if (attr->ia_valid & ATTR_GID)
943 inode->i_gid = attr->ia_gid;
944 f2fs_mark_inode_dirty_sync(inode, true);
945 f2fs_unlock_op(F2FS_I_SB(inode));
Chao Yu0abd6752017-07-09 00:13:07 +0800946 }
947
Chao Yu09db6a22014-09-15 18:02:09 +0800948 if (attr->ia_valid & ATTR_SIZE) {
Jaegeuk Kimcfb9a342019-09-03 10:06:26 +0800949 loff_t old_size = i_size_read(inode);
950
951 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
952 /*
953 * should convert inline inode before i_size_write to
954 * keep smaller than inline_data size with inline flag.
955 */
956 err = f2fs_convert_inline_inode(inode);
957 if (err)
958 return err;
959 }
Chao Yu0cab80e2015-12-01 11:36:16 +0800960
Chao Yua33c1502018-08-05 23:04:25 +0800961 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jan Karaedc6d012021-04-13 18:10:37 +0200962 filemap_invalidate_lock(inode->i_mapping);
Chao Yua33c1502018-08-05 23:04:25 +0800963
964 truncate_setsize(inode, attr->ia_size);
965
Jaegeuk Kimcfb9a342019-09-03 10:06:26 +0800966 if (attr->ia_size <= old_size)
Chao Yua33c1502018-08-05 23:04:25 +0800967 err = f2fs_truncate(inode);
968 /*
969 * do not trim all blocks after i_size if target size is
970 * larger than i_size.
971 */
Jan Karaedc6d012021-04-13 18:10:37 +0200972 filemap_invalidate_unlock(inode->i_mapping);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +0900973 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yua33c1502018-08-05 23:04:25 +0800974 if (err)
975 return err;
976
Chao Yuc10c9822020-02-27 19:30:03 +0800977 spin_lock(&F2FS_I(inode)->i_size_lock);
Jaegeuk Kimcfb9a342019-09-03 10:06:26 +0800978 inode->i_mtime = inode->i_ctime = current_time(inode);
Chao Yua0d00fa2017-10-09 17:55:19 +0800979 F2FS_I(inode)->last_disk_size = i_size_read(inode);
Chao Yuc10c9822020-02-27 19:30:03 +0800980 spin_unlock(&F2FS_I(inode)->i_size_lock);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900981 }
982
Christian Brauner2f221d62021-01-21 14:19:26 +0100983 __setattr_copy(&init_user_ns, inode, attr);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900984
985 if (attr->ia_valid & ATTR_MODE) {
Linus Torvalds7d6beb72021-02-23 13:39:45 -0800986 err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
Chao Yu17232e832020-12-25 16:52:27 +0800987
988 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
989 if (!err)
990 inode->i_mode = F2FS_I(inode)->i_acl_mode;
Jaegeuk Kim91942322016-05-20 10:13:22 -0700991 clear_inode_flag(inode, FI_ACL_MODE);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900992 }
993 }
994
Yunlei Hec0ed4402016-12-11 15:35:15 +0800995 /* file size may changed here */
Chao Yuca597bd2019-02-23 09:48:27 +0800996 f2fs_mark_inode_dirty_sync(inode, true);
Jaegeuk Kim15d04352016-10-14 13:30:31 -0700997
998 /* inode change will produce dirty node pages flushed by checkpoint */
999 f2fs_balance_fs(F2FS_I_SB(inode), true);
1000
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001001 return err;
1002}
1003
1004const struct inode_operations f2fs_file_inode_operations = {
1005 .getattr = f2fs_getattr,
1006 .setattr = f2fs_setattr,
1007 .get_acl = f2fs_get_acl,
Christoph Hellwiga6dda0e2013-12-20 05:16:45 -08001008 .set_acl = f2fs_set_acl,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001009 .listxattr = f2fs_listxattr,
Jaegeuk Kim9ab701342014-06-08 04:30:14 +09001010 .fiemap = f2fs_fiemap,
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02001011 .fileattr_get = f2fs_fileattr_get,
1012 .fileattr_set = f2fs_fileattr_set,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001013};
1014
Chao Yu63943282015-08-07 18:36:06 +08001015static int fill_zero(struct inode *inode, pgoff_t index,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001016 loff_t start, loff_t len)
1017{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001018 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001019 struct page *page;
1020
1021 if (!len)
Chao Yu63943282015-08-07 18:36:06 +08001022 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001023
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08001024 f2fs_balance_fs(sbi, true);
Jaegeuk Kimbd43df02013-01-25 18:33:41 +09001025
Gu Zhenge4795562013-09-27 18:08:30 +08001026 f2fs_lock_op(sbi);
Chao Yu4d57b862018-05-30 00:20:41 +08001027 page = f2fs_get_new_data_page(inode, NULL, index, false);
Gu Zhenge4795562013-09-27 18:08:30 +08001028 f2fs_unlock_op(sbi);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001029
Chao Yu63943282015-08-07 18:36:06 +08001030 if (IS_ERR(page))
1031 return PTR_ERR(page);
1032
Chao Yubae0ee72018-12-25 17:43:42 +08001033 f2fs_wait_on_page_writeback(page, DATA, true, true);
Chao Yu63943282015-08-07 18:36:06 +08001034 zero_user(page, start, len);
1035 set_page_dirty(page);
1036 f2fs_put_page(page, 1);
1037 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001038}
1039
Chao Yu4d57b862018-05-30 00:20:41 +08001040int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001041{
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001042 int err;
1043
Chao Yuea587112015-09-17 20:22:44 +08001044 while (pg_start < pg_end) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001045 struct dnode_of_data dn;
Chao Yuea587112015-09-17 20:22:44 +08001046 pgoff_t end_offset, count;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001047
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001048 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001049 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001050 if (err) {
Chao Yuea587112015-09-17 20:22:44 +08001051 if (err == -ENOENT) {
Chao Yu4d57b862018-05-30 00:20:41 +08001052 pg_start = f2fs_get_next_page_offset(&dn,
1053 pg_start);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001054 continue;
Chao Yuea587112015-09-17 20:22:44 +08001055 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001056 return err;
1057 }
1058
Chao Yu81ca7352016-01-26 15:39:35 +08001059 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Chao Yuea587112015-09-17 20:22:44 +08001060 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1061
1062 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1063
Chao Yu4d57b862018-05-30 00:20:41 +08001064 f2fs_truncate_data_blocks_range(&dn, count);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001065 f2fs_put_dnode(&dn);
Chao Yuea587112015-09-17 20:22:44 +08001066
1067 pg_start += count;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001068 }
1069 return 0;
1070}
1071
Chao Yua66c7b22013-11-22 16:52:50 +08001072static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001073{
1074 pgoff_t pg_start, pg_end;
1075 loff_t off_start, off_end;
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001076 int ret;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001077
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001078 ret = f2fs_convert_inline_inode(inode);
1079 if (ret)
1080 return ret;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001081
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001082 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1083 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001084
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001085 off_start = offset & (PAGE_SIZE - 1);
1086 off_end = (offset + len) & (PAGE_SIZE - 1);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001087
1088 if (pg_start == pg_end) {
Chao Yu63943282015-08-07 18:36:06 +08001089 ret = fill_zero(inode, pg_start, off_start,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001090 off_end - off_start);
Chao Yu63943282015-08-07 18:36:06 +08001091 if (ret)
1092 return ret;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001093 } else {
Chao Yu63943282015-08-07 18:36:06 +08001094 if (off_start) {
1095 ret = fill_zero(inode, pg_start++, off_start,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001096 PAGE_SIZE - off_start);
Chao Yu63943282015-08-07 18:36:06 +08001097 if (ret)
1098 return ret;
1099 }
1100 if (off_end) {
1101 ret = fill_zero(inode, pg_end, 0, off_end);
1102 if (ret)
1103 return ret;
1104 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001105
1106 if (pg_start < pg_end) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001107 loff_t blk_start, blk_end;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001108 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jason Hrycay1127a3d2013-04-08 20:16:44 -05001109
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08001110 f2fs_balance_fs(sbi, true);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001111
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001112 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1113 blk_end = (loff_t)pg_end << PAGE_SHIFT;
Chao Yua33c1502018-08-05 23:04:25 +08001114
Chao Yua33c1502018-08-05 23:04:25 +08001115 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Linus Torvalds6abaa832021-09-04 10:48:47 -07001116 filemap_invalidate_lock(inode->i_mapping);
Chao Yua33c1502018-08-05 23:04:25 +08001117
Chao Yuc8dc3042021-08-25 19:34:19 +08001118 truncate_pagecache_range(inode, blk_start, blk_end - 1);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001119
Gu Zhenge4795562013-09-27 18:08:30 +08001120 f2fs_lock_op(sbi);
Chao Yu4d57b862018-05-30 00:20:41 +08001121 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
Gu Zhenge4795562013-09-27 18:08:30 +08001122 f2fs_unlock_op(sbi);
Chao Yua33c1502018-08-05 23:04:25 +08001123
Linus Torvalds6abaa832021-09-04 10:48:47 -07001124 filemap_invalidate_unlock(inode->i_mapping);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001125 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001126 }
1127 }
1128
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001129 return ret;
1130}
1131
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001132static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1133 int *do_replace, pgoff_t off, pgoff_t len)
Chao Yub4ace332015-05-06 13:09:46 +08001134{
1135 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1136 struct dnode_of_data dn;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001137 int ret, done, i;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001138
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001139next_dnode:
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001140 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001141 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001142 if (ret && ret != -ENOENT) {
1143 return ret;
1144 } else if (ret == -ENOENT) {
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001145 if (dn.max_level == 0)
1146 return -ENOENT;
Chao Yu4c8ff702019-11-01 18:07:14 +08001147 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1148 dn.ofs_in_node, len);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001149 blkaddr += done;
1150 do_replace += done;
1151 goto next;
1152 }
1153
1154 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1155 dn.ofs_in_node, len);
1156 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
Chao Yua2ced1c2020-02-14 17:44:10 +08001157 *blkaddr = f2fs_data_blkaddr(&dn);
Chao Yu93770ab2019-04-15 15:26:32 +08001158
1159 if (__is_valid_data_blkaddr(*blkaddr) &&
1160 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1161 DATA_GENERIC_ENHANCE)) {
1162 f2fs_put_dnode(&dn);
Chao Yu10f966b2019-06-20 11:36:14 +08001163 return -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +08001164 }
1165
Chao Yu4d57b862018-05-30 00:20:41 +08001166 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001167
Chao Yub0332a02020-02-14 17:44:12 +08001168 if (f2fs_lfs_mode(sbi)) {
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001169 f2fs_put_dnode(&dn);
Chao Yufd114ab2019-08-15 19:45:36 +08001170 return -EOPNOTSUPP;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001171 }
1172
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001173 /* do not invalidate this block address */
Chao Yuf28b3432016-02-24 17:16:47 +08001174 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001175 *do_replace = 1;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001176 }
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001177 }
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001178 f2fs_put_dnode(&dn);
1179next:
1180 len -= done;
1181 off += done;
1182 if (len)
1183 goto next_dnode;
1184 return 0;
1185}
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001186
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001187static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1188 int *do_replace, pgoff_t off, int len)
1189{
1190 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1191 struct dnode_of_data dn;
1192 int ret, i;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001193
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001194 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1195 if (*do_replace == 0)
1196 continue;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001197
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001198 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001199 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001200 if (ret) {
1201 dec_valid_block_count(sbi, inode, 1);
Chao Yu4d57b862018-05-30 00:20:41 +08001202 f2fs_invalidate_blocks(sbi, *blkaddr);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001203 } else {
1204 f2fs_update_data_blkaddr(&dn, *blkaddr);
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001205 }
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001206 f2fs_put_dnode(&dn);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001207 }
1208 return 0;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001209}
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001210
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001211static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1212 block_t *blkaddr, int *do_replace,
1213 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1214{
1215 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1216 pgoff_t i = 0;
1217 int ret;
1218
1219 while (i < len) {
1220 if (blkaddr[i] == NULL_ADDR && !full) {
1221 i++;
1222 continue;
1223 }
1224
1225 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1226 struct dnode_of_data dn;
1227 struct node_info ni;
1228 size_t new_size;
1229 pgoff_t ilen;
1230
1231 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001232 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001233 if (ret)
1234 return ret;
1235
Jaegeuk Kima9419b62021-12-13 14:16:32 -08001236 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
Chao Yu77357302018-07-17 00:02:17 +08001237 if (ret) {
1238 f2fs_put_dnode(&dn);
1239 return ret;
1240 }
1241
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001242 ilen = min((pgoff_t)
1243 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1244 dn.ofs_in_node, len - i);
1245 do {
Chao Yua2ced1c2020-02-14 17:44:10 +08001246 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
Chao Yu4d57b862018-05-30 00:20:41 +08001247 f2fs_truncate_data_blocks_range(&dn, 1);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001248
1249 if (do_replace[i]) {
1250 f2fs_i_blocks_write(src_inode,
Chao Yu0abd6752017-07-09 00:13:07 +08001251 1, false, false);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001252 f2fs_i_blocks_write(dst_inode,
Chao Yu0abd6752017-07-09 00:13:07 +08001253 1, true, false);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001254 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1255 blkaddr[i], ni.version, true, false);
1256
1257 do_replace[i] = 0;
1258 }
1259 dn.ofs_in_node++;
1260 i++;
Chao Yu1f0d5c92019-11-07 17:29:00 +08001261 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001262 if (dst_inode->i_size < new_size)
1263 f2fs_i_size_write(dst_inode, new_size);
Jaegeuk Kime87f7322016-11-23 10:51:17 -08001264 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001265
1266 f2fs_put_dnode(&dn);
1267 } else {
1268 struct page *psrc, *pdst;
1269
Chao Yu4d57b862018-05-30 00:20:41 +08001270 psrc = f2fs_get_lock_data_page(src_inode,
1271 src + i, true);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001272 if (IS_ERR(psrc))
1273 return PTR_ERR(psrc);
Chao Yu4d57b862018-05-30 00:20:41 +08001274 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001275 true);
1276 if (IS_ERR(pdst)) {
1277 f2fs_put_page(psrc, 1);
1278 return PTR_ERR(pdst);
1279 }
1280 f2fs_copy_page(psrc, pdst);
1281 set_page_dirty(pdst);
1282 f2fs_put_page(pdst, 1);
1283 f2fs_put_page(psrc, 1);
1284
Chao Yu4d57b862018-05-30 00:20:41 +08001285 ret = f2fs_truncate_hole(src_inode,
1286 src + i, src + i + 1);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001287 if (ret)
1288 return ret;
1289 i++;
1290 }
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001291 }
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001292 return 0;
1293}
1294
1295static int __exchange_data_block(struct inode *src_inode,
1296 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001297 pgoff_t len, bool full)
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001298{
1299 block_t *src_blkaddr;
1300 int *do_replace;
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001301 pgoff_t olen;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001302 int ret;
1303
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001304 while (len) {
Chao Yud02a6e62019-03-25 21:08:19 +08001305 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001306
Chao Yu628b3d12017-11-30 19:28:18 +08001307 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
Kees Cook9d2a7892018-06-12 14:28:35 -07001308 array_size(olen, sizeof(block_t)),
Jaegeuk Kim4f4460c2019-12-03 19:02:15 -08001309 GFP_NOFS);
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001310 if (!src_blkaddr)
1311 return -ENOMEM;
1312
Chao Yu628b3d12017-11-30 19:28:18 +08001313 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
Kees Cook9d2a7892018-06-12 14:28:35 -07001314 array_size(olen, sizeof(int)),
Jaegeuk Kim4f4460c2019-12-03 19:02:15 -08001315 GFP_NOFS);
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001316 if (!do_replace) {
1317 kvfree(src_blkaddr);
1318 return -ENOMEM;
1319 }
1320
1321 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1322 do_replace, src, olen);
1323 if (ret)
1324 goto roll_back;
1325
1326 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1327 do_replace, src, dst, olen, full);
1328 if (ret)
1329 goto roll_back;
1330
1331 src += olen;
1332 dst += olen;
1333 len -= olen;
1334
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001335 kvfree(src_blkaddr);
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001336 kvfree(do_replace);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001337 }
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001338 return 0;
1339
1340roll_back:
Chao Yu9fd62602018-05-28 23:47:19 +08001341 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001342 kvfree(src_blkaddr);
1343 kvfree(do_replace);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001344 return ret;
1345}
1346
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001347static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001348{
1349 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Geert Uytterhoevenf91108b2019-06-20 16:42:08 +02001350 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001351 pgoff_t start = offset >> PAGE_SHIFT;
1352 pgoff_t end = (offset + len) >> PAGE_SHIFT;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001353 int ret;
Chao Yub4ace332015-05-06 13:09:46 +08001354
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001355 f2fs_balance_fs(sbi, true);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001356
1357 /* avoid gc operation during block exchange */
1358 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jan Karaedc6d012021-04-13 18:10:37 +02001359 filemap_invalidate_lock(inode->i_mapping);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001360
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001361 f2fs_lock_op(sbi);
Jaegeuk Kim5f281fa2016-07-12 11:07:52 -07001362 f2fs_drop_extent_tree(inode);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001363 truncate_pagecache(inode, offset);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001364 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1365 f2fs_unlock_op(sbi);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001366
Jan Karaedc6d012021-04-13 18:10:37 +02001367 filemap_invalidate_unlock(inode->i_mapping);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001368 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yub4ace332015-05-06 13:09:46 +08001369 return ret;
1370}
1371
1372static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1373{
Chao Yub4ace332015-05-06 13:09:46 +08001374 loff_t new_size;
1375 int ret;
1376
Chao Yub4ace332015-05-06 13:09:46 +08001377 if (offset + len >= i_size_read(inode))
1378 return -EINVAL;
1379
1380 /* collapse range should be aligned to block size of f2fs. */
1381 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1382 return -EINVAL;
1383
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001384 ret = f2fs_convert_inline_inode(inode);
1385 if (ret)
1386 return ret;
Jaegeuk Kim97a7b2c2015-06-17 13:59:05 -07001387
Chao Yub4ace332015-05-06 13:09:46 +08001388 /* write out all dirty pages from offset */
1389 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1390 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001391 return ret;
Chao Yubb066642017-11-03 10:21:05 +08001392
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001393 ret = f2fs_do_collapse(inode, offset, len);
Chao Yub4ace332015-05-06 13:09:46 +08001394 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001395 return ret;
Chao Yub4ace332015-05-06 13:09:46 +08001396
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001397 /* write out all moved pages, if possible */
Jan Karaedc6d012021-04-13 18:10:37 +02001398 filemap_invalidate_lock(inode->i_mapping);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001399 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1400 truncate_pagecache(inode, offset);
1401
Chao Yub4ace332015-05-06 13:09:46 +08001402 new_size = i_size_read(inode) - len;
Chao Yuc42d28c2019-02-02 17:33:01 +08001403 ret = f2fs_truncate_blocks(inode, new_size, true);
Jan Karaedc6d012021-04-13 18:10:37 +02001404 filemap_invalidate_unlock(inode->i_mapping);
Chao Yub4ace332015-05-06 13:09:46 +08001405 if (!ret)
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07001406 f2fs_i_size_write(inode, new_size);
Chao Yub4ace332015-05-06 13:09:46 +08001407 return ret;
1408}
1409
Chao Yu6e961942016-05-09 19:56:31 +08001410static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1411 pgoff_t end)
1412{
1413 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1414 pgoff_t index = start;
1415 unsigned int ofs_in_node = dn->ofs_in_node;
1416 blkcnt_t count = 0;
1417 int ret;
1418
1419 for (; index < end; index++, dn->ofs_in_node++) {
Chao Yua2ced1c2020-02-14 17:44:10 +08001420 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
Chao Yu6e961942016-05-09 19:56:31 +08001421 count++;
1422 }
1423
1424 dn->ofs_in_node = ofs_in_node;
Chao Yu4d57b862018-05-30 00:20:41 +08001425 ret = f2fs_reserve_new_blocks(dn, count);
Chao Yu6e961942016-05-09 19:56:31 +08001426 if (ret)
1427 return ret;
1428
1429 dn->ofs_in_node = ofs_in_node;
1430 for (index = start; index < end; index++, dn->ofs_in_node++) {
Chao Yua2ced1c2020-02-14 17:44:10 +08001431 dn->data_blkaddr = f2fs_data_blkaddr(dn);
Chao Yu6e961942016-05-09 19:56:31 +08001432 /*
Chao Yu4d57b862018-05-30 00:20:41 +08001433 * f2fs_reserve_new_blocks will not guarantee entire block
Chao Yu6e961942016-05-09 19:56:31 +08001434 * allocation.
1435 */
1436 if (dn->data_blkaddr == NULL_ADDR) {
1437 ret = -ENOSPC;
1438 break;
1439 }
1440 if (dn->data_blkaddr != NEW_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +08001441 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
Chao Yu6e961942016-05-09 19:56:31 +08001442 dn->data_blkaddr = NEW_ADDR;
Chao Yu4d57b862018-05-30 00:20:41 +08001443 f2fs_set_data_blkaddr(dn);
Chao Yu6e961942016-05-09 19:56:31 +08001444 }
1445 }
1446
1447 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1448
1449 return ret;
1450}
1451
Chao Yu75cd4e02015-05-06 13:11:13 +08001452static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1453 int mode)
1454{
1455 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1456 struct address_space *mapping = inode->i_mapping;
1457 pgoff_t index, pg_start, pg_end;
1458 loff_t new_size = i_size_read(inode);
1459 loff_t off_start, off_end;
1460 int ret = 0;
1461
Chao Yu75cd4e02015-05-06 13:11:13 +08001462 ret = inode_newsize_ok(inode, (len + offset));
1463 if (ret)
1464 return ret;
1465
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001466 ret = f2fs_convert_inline_inode(inode);
1467 if (ret)
1468 return ret;
Chao Yu75cd4e02015-05-06 13:11:13 +08001469
1470 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1471 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001472 return ret;
Chao Yu75cd4e02015-05-06 13:11:13 +08001473
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001474 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1475 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
Chao Yu75cd4e02015-05-06 13:11:13 +08001476
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001477 off_start = offset & (PAGE_SIZE - 1);
1478 off_end = (offset + len) & (PAGE_SIZE - 1);
Chao Yu75cd4e02015-05-06 13:11:13 +08001479
1480 if (pg_start == pg_end) {
Chao Yu63943282015-08-07 18:36:06 +08001481 ret = fill_zero(inode, pg_start, off_start,
1482 off_end - off_start);
1483 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001484 return ret;
Chao Yu63943282015-08-07 18:36:06 +08001485
Chao Yu75cd4e02015-05-06 13:11:13 +08001486 new_size = max_t(loff_t, new_size, offset + len);
1487 } else {
1488 if (off_start) {
Chao Yu63943282015-08-07 18:36:06 +08001489 ret = fill_zero(inode, pg_start++, off_start,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001490 PAGE_SIZE - off_start);
Chao Yu63943282015-08-07 18:36:06 +08001491 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001492 return ret;
Chao Yu63943282015-08-07 18:36:06 +08001493
Chao Yu75cd4e02015-05-06 13:11:13 +08001494 new_size = max_t(loff_t, new_size,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001495 (loff_t)pg_start << PAGE_SHIFT);
Chao Yu75cd4e02015-05-06 13:11:13 +08001496 }
1497
Chao Yu6e961942016-05-09 19:56:31 +08001498 for (index = pg_start; index < pg_end;) {
Chao Yu75cd4e02015-05-06 13:11:13 +08001499 struct dnode_of_data dn;
Chao Yu6e961942016-05-09 19:56:31 +08001500 unsigned int end_offset;
1501 pgoff_t end;
Chao Yu75cd4e02015-05-06 13:11:13 +08001502
Chao Yuc70798532018-08-05 23:02:22 +08001503 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jan Karaedc6d012021-04-13 18:10:37 +02001504 filemap_invalidate_lock(mapping);
Chao Yuc70798532018-08-05 23:02:22 +08001505
1506 truncate_pagecache_range(inode,
1507 (loff_t)index << PAGE_SHIFT,
1508 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1509
Chao Yu75cd4e02015-05-06 13:11:13 +08001510 f2fs_lock_op(sbi);
1511
Chao Yu6e961942016-05-09 19:56:31 +08001512 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001513 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
Chao Yu75cd4e02015-05-06 13:11:13 +08001514 if (ret) {
1515 f2fs_unlock_op(sbi);
Jan Karaedc6d012021-04-13 18:10:37 +02001516 filemap_invalidate_unlock(mapping);
Chao Yuc70798532018-08-05 23:02:22 +08001517 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yu75cd4e02015-05-06 13:11:13 +08001518 goto out;
1519 }
1520
Chao Yu6e961942016-05-09 19:56:31 +08001521 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1522 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1523
1524 ret = f2fs_do_zero_range(&dn, index, end);
Chao Yu75cd4e02015-05-06 13:11:13 +08001525 f2fs_put_dnode(&dn);
Chao Yuc70798532018-08-05 23:02:22 +08001526
Chao Yu75cd4e02015-05-06 13:11:13 +08001527 f2fs_unlock_op(sbi);
Jan Karaedc6d012021-04-13 18:10:37 +02001528 filemap_invalidate_unlock(mapping);
Chao Yuc70798532018-08-05 23:02:22 +08001529 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yu9434fcd2016-10-11 22:57:02 +08001530
1531 f2fs_balance_fs(sbi, dn.node_changed);
1532
Chao Yu6e961942016-05-09 19:56:31 +08001533 if (ret)
1534 goto out;
Chao Yu75cd4e02015-05-06 13:11:13 +08001535
Chao Yu6e961942016-05-09 19:56:31 +08001536 index = end;
Chao Yu75cd4e02015-05-06 13:11:13 +08001537 new_size = max_t(loff_t, new_size,
Chao Yu6e961942016-05-09 19:56:31 +08001538 (loff_t)index << PAGE_SHIFT);
Chao Yu75cd4e02015-05-06 13:11:13 +08001539 }
1540
1541 if (off_end) {
Chao Yu63943282015-08-07 18:36:06 +08001542 ret = fill_zero(inode, pg_end, 0, off_end);
1543 if (ret)
1544 goto out;
1545
Chao Yu75cd4e02015-05-06 13:11:13 +08001546 new_size = max_t(loff_t, new_size, offset + len);
1547 }
1548 }
1549
1550out:
Chao Yu17cd07a2018-02-25 23:38:21 +08001551 if (new_size > i_size_read(inode)) {
1552 if (mode & FALLOC_FL_KEEP_SIZE)
1553 file_set_keep_isize(inode);
1554 else
1555 f2fs_i_size_write(inode, new_size);
1556 }
Chao Yu75cd4e02015-05-06 13:11:13 +08001557 return ret;
1558}
1559
Chao Yuf62185d2015-05-28 19:16:57 +08001560static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1561{
1562 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jan Karaedc6d012021-04-13 18:10:37 +02001563 struct address_space *mapping = inode->i_mapping;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001564 pgoff_t nr, pg_start, pg_end, delta, idx;
Chao Yuf62185d2015-05-28 19:16:57 +08001565 loff_t new_size;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001566 int ret = 0;
Chao Yuf62185d2015-05-28 19:16:57 +08001567
Chao Yuf62185d2015-05-28 19:16:57 +08001568 new_size = i_size_read(inode) + len;
Kinglong Mee46e82fb2017-03-10 17:54:52 +08001569 ret = inode_newsize_ok(inode, new_size);
1570 if (ret)
1571 return ret;
Chao Yuf62185d2015-05-28 19:16:57 +08001572
1573 if (offset >= i_size_read(inode))
1574 return -EINVAL;
1575
1576 /* insert range should be aligned to block size of f2fs. */
1577 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1578 return -EINVAL;
1579
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001580 ret = f2fs_convert_inline_inode(inode);
1581 if (ret)
1582 return ret;
Jaegeuk Kim97a7b2c2015-06-17 13:59:05 -07001583
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08001584 f2fs_balance_fs(sbi, true);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001585
Jan Karaedc6d012021-04-13 18:10:37 +02001586 filemap_invalidate_lock(mapping);
Chao Yuc42d28c2019-02-02 17:33:01 +08001587 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
Jan Karaedc6d012021-04-13 18:10:37 +02001588 filemap_invalidate_unlock(mapping);
Chao Yuf62185d2015-05-28 19:16:57 +08001589 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001590 return ret;
Chao Yuf62185d2015-05-28 19:16:57 +08001591
1592 /* write out all dirty pages from offset */
Jan Karaedc6d012021-04-13 18:10:37 +02001593 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
Chao Yuf62185d2015-05-28 19:16:57 +08001594 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001595 return ret;
Chao Yuf62185d2015-05-28 19:16:57 +08001596
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001597 pg_start = offset >> PAGE_SHIFT;
1598 pg_end = (offset + len) >> PAGE_SHIFT;
Chao Yuf62185d2015-05-28 19:16:57 +08001599 delta = pg_end - pg_start;
Geert Uytterhoevenf91108b2019-06-20 16:42:08 +02001600 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
Chao Yuf62185d2015-05-28 19:16:57 +08001601
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001602 /* avoid gc operation during block exchange */
1603 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jan Karaedc6d012021-04-13 18:10:37 +02001604 filemap_invalidate_lock(mapping);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001605 truncate_pagecache(inode, offset);
1606
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001607 while (!ret && idx > pg_start) {
1608 nr = idx - pg_start;
1609 if (nr > delta)
1610 nr = delta;
1611 idx -= nr;
1612
Chao Yuf62185d2015-05-28 19:16:57 +08001613 f2fs_lock_op(sbi);
Jaegeuk Kim5f281fa2016-07-12 11:07:52 -07001614 f2fs_drop_extent_tree(inode);
1615
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001616 ret = __exchange_data_block(inode, inode, idx,
1617 idx + delta, nr, false);
Chao Yuf62185d2015-05-28 19:16:57 +08001618 f2fs_unlock_op(sbi);
1619 }
Jan Karaedc6d012021-04-13 18:10:37 +02001620 filemap_invalidate_unlock(mapping);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001621 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yuf62185d2015-05-28 19:16:57 +08001622
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001623 /* write out all moved pages, if possible */
Jan Karaedc6d012021-04-13 18:10:37 +02001624 filemap_invalidate_lock(mapping);
1625 filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001626 truncate_pagecache(inode, offset);
Jan Karaedc6d012021-04-13 18:10:37 +02001627 filemap_invalidate_unlock(mapping);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001628
1629 if (!ret)
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07001630 f2fs_i_size_write(inode, new_size);
Chao Yuf62185d2015-05-28 19:16:57 +08001631 return ret;
1632}
1633
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001634static int expand_inode_data(struct inode *inode, loff_t offset,
1635 loff_t len, int mode)
1636{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001637 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Hyunchul Leed5097be2017-11-28 09:23:00 +09001638 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
Chao Yuf9d6d052018-11-13 14:33:45 +08001639 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1640 .m_may_create = true };
Chao Yu88f2cfc2021-03-24 11:24:33 +08001641 pgoff_t pg_start, pg_end;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001642 loff_t new_size = i_size_read(inode);
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001643 loff_t off_end;
Chao Yu88f2cfc2021-03-24 11:24:33 +08001644 block_t expanded = 0;
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001645 int err;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001646
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001647 err = inode_newsize_ok(inode, (len + offset));
1648 if (err)
1649 return err;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001650
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001651 err = f2fs_convert_inline_inode(inode);
1652 if (err)
1653 return err;
Jaegeuk Kim9e09fc82013-12-27 12:28:59 +09001654
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08001655 f2fs_balance_fs(sbi, true);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001656
Chao Yu88f2cfc2021-03-24 11:24:33 +08001657 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001658 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001659 off_end = (offset + len) & (PAGE_SIZE - 1);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001660
Chao Yu88f2cfc2021-03-24 11:24:33 +08001661 map.m_lblk = pg_start;
1662 map.m_len = pg_end - pg_start;
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001663 if (off_end)
1664 map.m_len++;
Jaegeuk Kimead43272014-06-13 13:05:55 +09001665
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001666 if (!map.m_len)
1667 return 0;
Jaegeuk Kimcad38362019-06-26 18:23:05 -07001668
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001669 if (f2fs_is_pinned_file(inode)) {
Chao Yue1175f02021-03-05 17:56:01 +08001670 block_t sec_blks = BLKS_PER_SEC(sbi);
1671 block_t sec_len = roundup(map.m_len, sec_blks);
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001672
Chao Yue1175f02021-03-05 17:56:01 +08001673 map.m_len = sec_blks;
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001674next_alloc:
1675 if (has_not_enough_free_secs(sbi, 0,
1676 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
Chao Yufb24fea2020-01-14 19:36:50 +08001677 down_write(&sbi->gc_lock);
Chao Yu7dede8862021-02-20 17:35:40 +08001678 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001679 if (err && err != -ENODATA && err != -EAGAIN)
1680 goto out_err;
1681 }
1682
1683 down_write(&sbi->pin_sem);
Daeho Jeongfd612642020-05-27 13:02:31 +09001684
1685 f2fs_lock_op(sbi);
Chao Yu509f1012021-04-21 09:54:55 +08001686 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
Daeho Jeongfd612642020-05-27 13:02:31 +09001687 f2fs_unlock_op(sbi);
1688
Chao Yud0b9e422020-08-04 21:14:45 +08001689 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001690 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -08001691 file_dont_truncate(inode);
Chao Yud0b9e422020-08-04 21:14:45 +08001692
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001693 up_write(&sbi->pin_sem);
1694
Chao Yu88f2cfc2021-03-24 11:24:33 +08001695 expanded += map.m_len;
Chao Yue1175f02021-03-05 17:56:01 +08001696 sec_len -= map.m_len;
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001697 map.m_lblk += map.m_len;
Chao Yue1175f02021-03-05 17:56:01 +08001698 if (!err && sec_len)
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001699 goto next_alloc;
1700
Chao Yu88f2cfc2021-03-24 11:24:33 +08001701 map.m_len = expanded;
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001702 } else {
1703 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
Chao Yu88f2cfc2021-03-24 11:24:33 +08001704 expanded = map.m_len;
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001705 }
1706out_err:
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001707 if (err) {
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001708 pgoff_t last_off;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001709
Chao Yu88f2cfc2021-03-24 11:24:33 +08001710 if (!expanded)
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001711 return err;
Jaegeuk Kim98397ff2014-06-13 13:07:31 +09001712
Chao Yu88f2cfc2021-03-24 11:24:33 +08001713 last_off = pg_start + expanded - 1;
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001714
1715 /* update new size to the failed position */
youngjun yoo1061fd42018-05-30 04:34:58 +09001716 new_size = (last_off == pg_end) ? offset + len :
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001717 (loff_t)(last_off + 1) << PAGE_SHIFT;
1718 } else {
1719 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001720 }
1721
Chao Yue8ed90a2017-11-05 21:53:30 +08001722 if (new_size > i_size_read(inode)) {
1723 if (mode & FALLOC_FL_KEEP_SIZE)
1724 file_set_keep_isize(inode);
1725 else
1726 f2fs_i_size_write(inode, new_size);
1727 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001728
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001729 return err;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001730}
1731
1732static long f2fs_fallocate(struct file *file, int mode,
1733 loff_t offset, loff_t len)
1734{
Al Viro6131ffa2013-02-27 16:59:05 -05001735 struct inode *inode = file_inode(file);
Taehee Yoo587c0a42015-04-21 15:59:12 +09001736 long ret = 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001737
Jaegeuk Kim1f227a32017-10-23 23:48:49 +02001738 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1739 return -EIO;
Chao Yu00e09c02019-08-23 17:58:36 +08001740 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1741 return -ENOSPC;
Chao Yu4c8ff702019-11-01 18:07:14 +08001742 if (!f2fs_is_compress_backend_ready(inode))
1743 return -EOPNOTSUPP;
Jaegeuk Kim1f227a32017-10-23 23:48:49 +02001744
Chao Yuc9980122015-09-11 14:39:02 +08001745 /* f2fs only support ->fallocate for regular file */
1746 if (!S_ISREG(inode->i_mode))
1747 return -EINVAL;
1748
Chandan Rajendra62230e0d2018-12-12 15:20:11 +05301749 if (IS_ENCRYPTED(inode) &&
Chao Yuf62185d2015-05-28 19:16:57 +08001750 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07001751 return -EOPNOTSUPP;
1752
Jaegeuk Kim5fed0be2022-01-07 20:08:45 -08001753 /*
1754 * Pinned file should not support partial trucation since the block
1755 * can be used by applications.
1756 */
1757 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
Chao Yu4c8ff702019-11-01 18:07:14 +08001758 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1759 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1760 return -EOPNOTSUPP;
1761
Chao Yub4ace332015-05-06 13:09:46 +08001762 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
Chao Yuf62185d2015-05-28 19:16:57 +08001763 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1764 FALLOC_FL_INSERT_RANGE))
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001765 return -EOPNOTSUPP;
1766
Al Viro59551022016-01-22 15:40:57 -05001767 inode_lock(inode);
Chao Yu3375f692014-01-28 10:29:26 +08001768
Taehee Yoo587c0a42015-04-21 15:59:12 +09001769 if (mode & FALLOC_FL_PUNCH_HOLE) {
1770 if (offset >= inode->i_size)
1771 goto out;
1772
Chao Yua66c7b22013-11-22 16:52:50 +08001773 ret = punch_hole(inode, offset, len);
Chao Yub4ace332015-05-06 13:09:46 +08001774 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1775 ret = f2fs_collapse_range(inode, offset, len);
Chao Yu75cd4e02015-05-06 13:11:13 +08001776 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1777 ret = f2fs_zero_range(inode, offset, len, mode);
Chao Yuf62185d2015-05-28 19:16:57 +08001778 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1779 ret = f2fs_insert_range(inode, offset, len);
Chao Yub4ace332015-05-06 13:09:46 +08001780 } else {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001781 ret = expand_inode_data(inode, offset, len, mode);
Chao Yub4ace332015-05-06 13:09:46 +08001782 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001783
Namjae Jeon3af60a42012-12-30 14:52:37 +09001784 if (!ret) {
Deepa Dinamani078cd822016-09-14 07:48:04 -07001785 inode->i_mtime = inode->i_ctime = current_time(inode);
Jaegeuk Kim7c457292016-10-14 11:51:23 -07001786 f2fs_mark_inode_dirty_sync(inode, false);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08001787 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Namjae Jeon3af60a42012-12-30 14:52:37 +09001788 }
Chao Yu3375f692014-01-28 10:29:26 +08001789
Taehee Yoo587c0a42015-04-21 15:59:12 +09001790out:
Al Viro59551022016-01-22 15:40:57 -05001791 inode_unlock(inode);
Chao Yu3375f692014-01-28 10:29:26 +08001792
Namjae Jeonc01e2852013-04-23 17:00:52 +09001793 trace_f2fs_fallocate(inode, mode, offset, len, ret);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001794 return ret;
1795}
1796
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001797static int f2fs_release_file(struct inode *inode, struct file *filp)
1798{
Jaegeuk Kimde5307e2016-04-11 11:51:51 -07001799 /*
1800 * f2fs_relase_file is called at every close calls. So we should
1801 * not drop any inmemory pages by close called by other process.
1802 */
1803 if (!(filp->f_mode & FMODE_WRITE) ||
1804 atomic_read(&inode->i_writecount) != 1)
1805 return 0;
1806
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001807 /* some remained atomic pages should discarded */
1808 if (f2fs_is_atomic_file(inode))
Chao Yu4d57b862018-05-30 00:20:41 +08001809 f2fs_drop_inmem_pages(inode);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001810 if (f2fs_is_volatile_file(inode)) {
Jaegeuk Kim91942322016-05-20 10:13:22 -07001811 set_inode_flag(inode, FI_DROP_CACHE);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001812 filemap_fdatawrite(inode->i_mapping);
Jaegeuk Kim91942322016-05-20 10:13:22 -07001813 clear_inode_flag(inode, FI_DROP_CACHE);
Chao Yudfa74282018-06-04 23:20:51 +08001814 clear_inode_flag(inode, FI_VOLATILE_FILE);
1815 stat_dec_volatile_write(inode);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001816 }
1817 return 0;
1818}
1819
Jaegeuk Kim7a10f012017-07-24 19:46:29 -07001820static int f2fs_file_flush(struct file *file, fl_owner_t id)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001821{
Jaegeuk Kim7a10f012017-07-24 19:46:29 -07001822 struct inode *inode = file_inode(file);
1823
1824 /*
1825 * If the process doing a transaction is crashed, we should do
1826 * roll-back. Otherwise, other reader/write can see corrupted database
1827 * until all the writers close its file. Since this should be done
1828 * before dropping file lock, it needs to do in ->flush.
1829 */
1830 if (f2fs_is_atomic_file(inode) &&
1831 F2FS_I(inode)->inmem_task == current)
Chao Yu4d57b862018-05-30 00:20:41 +08001832 f2fs_drop_inmem_pages(inode);
Jaegeuk Kim7a10f012017-07-24 19:46:29 -07001833 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001834}
1835
Eric Biggers36098552019-06-04 22:59:04 -07001836static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
Chao Yu2c1d0302017-07-29 00:32:52 +08001837{
1838 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kim99eabb92020-03-05 15:20:26 -08001839 u32 masked_flags = fi->i_flags & mask;
1840
Jaegeuk Kima7531032021-05-06 12:11:14 -07001841 /* mask can be shrunk by flags_valid selector */
1842 iflags &= mask;
Chao Yu2c1d0302017-07-29 00:32:52 +08001843
1844 /* Is it quota file? Do not allow user to mess with it */
1845 if (IS_NOQUOTA(inode))
1846 return -EPERM;
1847
Jaegeuk Kim99eabb92020-03-05 15:20:26 -08001848 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
Daniel Rosenberg2c2eb7a2019-07-23 16:05:29 -07001849 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1850 return -EOPNOTSUPP;
1851 if (!f2fs_empty_dir(inode))
1852 return -ENOTEMPTY;
1853 }
1854
Chao Yu4c8ff702019-11-01 18:07:14 +08001855 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1856 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1857 return -EOPNOTSUPP;
1858 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1859 return -EINVAL;
1860 }
1861
Jaegeuk Kim99eabb92020-03-05 15:20:26 -08001862 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
Chao Yuaa576972020-03-27 18:29:51 +08001863 if (masked_flags & F2FS_COMPR_FL) {
Daeho Jeong78134d02020-09-08 11:44:11 +09001864 if (!f2fs_disable_compressed_file(inode))
Chao Yu2536ac62020-03-10 20:50:09 +08001865 return -EINVAL;
1866 }
Chao Yu4c8ff702019-11-01 18:07:14 +08001867 if (iflags & F2FS_NOCOMP_FL)
1868 return -EINVAL;
1869 if (iflags & F2FS_COMPR_FL) {
Chao Yu4c8ff702019-11-01 18:07:14 +08001870 if (!f2fs_may_compress(inode))
1871 return -EINVAL;
Chao Yu519a5a22020-09-18 11:03:49 +08001872 if (S_ISREG(inode->i_mode) && inode->i_size)
1873 return -EINVAL;
Chao Yu4c8ff702019-11-01 18:07:14 +08001874
1875 set_compress_context(inode);
1876 }
1877 }
Jaegeuk Kim99eabb92020-03-05 15:20:26 -08001878 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1879 if (masked_flags & F2FS_COMPR_FL)
Chao Yu4c8ff702019-11-01 18:07:14 +08001880 return -EINVAL;
1881 }
1882
Eric Biggersd5e5efa2019-07-01 13:26:30 -07001883 fi->i_flags = iflags | (fi->i_flags & ~mask);
Chao Yu4c8ff702019-11-01 18:07:14 +08001884 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1885 (fi->i_flags & F2FS_NOCOMP_FL));
Chao Yu2c1d0302017-07-29 00:32:52 +08001886
Chao Yu59c84402018-04-03 15:08:17 +08001887 if (fi->i_flags & F2FS_PROJINHERIT_FL)
Chao Yu2c1d0302017-07-29 00:32:52 +08001888 set_inode_flag(inode, FI_PROJ_INHERIT);
1889 else
1890 clear_inode_flag(inode, FI_PROJ_INHERIT);
1891
1892 inode->i_ctime = current_time(inode);
1893 f2fs_set_inode_flags(inode);
Chao Yub32e0192018-12-18 19:20:17 +08001894 f2fs_mark_inode_dirty_sync(inode, true);
Chao Yu2c1d0302017-07-29 00:32:52 +08001895 return 0;
1896}
1897
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02001898/* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
Eric Biggers36098552019-06-04 22:59:04 -07001899
1900/*
1901 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1902 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1903 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1904 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02001905 *
1906 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1907 * FS_IOC_FSSETXATTR is done by the VFS.
Eric Biggers36098552019-06-04 22:59:04 -07001908 */
1909
1910static const struct {
1911 u32 iflag;
1912 u32 fsflag;
1913} f2fs_fsflags_map[] = {
Chao Yu4c8ff702019-11-01 18:07:14 +08001914 { F2FS_COMPR_FL, FS_COMPR_FL },
Eric Biggers36098552019-06-04 22:59:04 -07001915 { F2FS_SYNC_FL, FS_SYNC_FL },
1916 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1917 { F2FS_APPEND_FL, FS_APPEND_FL },
1918 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1919 { F2FS_NOATIME_FL, FS_NOATIME_FL },
Chao Yu4c8ff702019-11-01 18:07:14 +08001920 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
Eric Biggers36098552019-06-04 22:59:04 -07001921 { F2FS_INDEX_FL, FS_INDEX_FL },
1922 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1923 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
Daniel Rosenberg2c2eb7a2019-07-23 16:05:29 -07001924 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
Eric Biggers36098552019-06-04 22:59:04 -07001925};
1926
1927#define F2FS_GETTABLE_FS_FL ( \
Chao Yu4c8ff702019-11-01 18:07:14 +08001928 FS_COMPR_FL | \
Eric Biggers36098552019-06-04 22:59:04 -07001929 FS_SYNC_FL | \
1930 FS_IMMUTABLE_FL | \
1931 FS_APPEND_FL | \
1932 FS_NODUMP_FL | \
1933 FS_NOATIME_FL | \
Chao Yu4c8ff702019-11-01 18:07:14 +08001934 FS_NOCOMP_FL | \
Eric Biggers36098552019-06-04 22:59:04 -07001935 FS_INDEX_FL | \
1936 FS_DIRSYNC_FL | \
1937 FS_PROJINHERIT_FL | \
1938 FS_ENCRYPT_FL | \
1939 FS_INLINE_DATA_FL | \
Eric Biggers95ae2512019-07-22 09:26:24 -07001940 FS_NOCOW_FL | \
Linus Torvaldsfbc246a2019-09-21 14:26:33 -07001941 FS_VERITY_FL | \
Daniel Rosenberg2c2eb7a2019-07-23 16:05:29 -07001942 FS_CASEFOLD_FL)
Eric Biggers36098552019-06-04 22:59:04 -07001943
1944#define F2FS_SETTABLE_FS_FL ( \
Chao Yu4c8ff702019-11-01 18:07:14 +08001945 FS_COMPR_FL | \
Eric Biggers36098552019-06-04 22:59:04 -07001946 FS_SYNC_FL | \
1947 FS_IMMUTABLE_FL | \
1948 FS_APPEND_FL | \
1949 FS_NODUMP_FL | \
1950 FS_NOATIME_FL | \
Chao Yu4c8ff702019-11-01 18:07:14 +08001951 FS_NOCOMP_FL | \
Eric Biggers36098552019-06-04 22:59:04 -07001952 FS_DIRSYNC_FL | \
Daniel Rosenberg2c2eb7a2019-07-23 16:05:29 -07001953 FS_PROJINHERIT_FL | \
1954 FS_CASEFOLD_FL)
Eric Biggers36098552019-06-04 22:59:04 -07001955
1956/* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1957static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1958{
1959 u32 fsflags = 0;
1960 int i;
1961
1962 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1963 if (iflags & f2fs_fsflags_map[i].iflag)
1964 fsflags |= f2fs_fsflags_map[i].fsflag;
1965
1966 return fsflags;
1967}
1968
1969/* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1970static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1971{
1972 u32 iflags = 0;
1973 int i;
1974
1975 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1976 if (fsflags & f2fs_fsflags_map[i].fsflag)
1977 iflags |= f2fs_fsflags_map[i].iflag;
1978
1979 return iflags;
1980}
1981
Chao Yud49f3e82015-01-23 20:36:04 +08001982static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1983{
1984 struct inode *inode = file_inode(filp);
1985
1986 return put_user(inode->i_generation, (int __user *)arg);
1987}
1988
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001989static int f2fs_ioc_start_atomic_write(struct file *filp)
1990{
1991 struct inode *inode = file_inode(filp);
Jaegeuk Kim743b6202019-09-09 13:10:59 +01001992 struct f2fs_inode_info *fi = F2FS_I(inode);
1993 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuf4c9c742015-07-17 18:06:35 +08001994 int ret;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001995
Christian Brauner21cb47b2021-01-21 14:19:25 +01001996 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001997 return -EACCES;
1998
Jaegeuk Kime8118982017-03-17 10:04:15 +08001999 if (!S_ISREG(inode->i_mode))
2000 return -EINVAL;
2001
Chao Yu038d0692019-07-25 22:39:11 +08002002 if (filp->f_flags & O_DIRECT)
2003 return -EINVAL;
2004
Chao Yu7fb17fe2016-05-09 19:56:32 +08002005 ret = mnt_want_write_file(filp);
2006 if (ret)
2007 return ret;
2008
Chao Yu0fac5582016-05-09 19:56:33 +08002009 inode_lock(inode);
2010
Chao Yu4c8ff702019-11-01 18:07:14 +08002011 f2fs_disable_compressed_file(inode);
2012
Jaegeuk Kim455e3a52018-07-27 18:15:11 +09002013 if (f2fs_is_atomic_file(inode)) {
2014 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2015 ret = -EINVAL;
Chao Yu7fb17fe2016-05-09 19:56:32 +08002016 goto out;
Jaegeuk Kim455e3a52018-07-27 18:15:11 +09002017 }
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002018
Chao Yuf4c9c742015-07-17 18:06:35 +08002019 ret = f2fs_convert_inline_inode(inode);
2020 if (ret)
Chao Yu7fb17fe2016-05-09 19:56:32 +08002021 goto out;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002022
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002023 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2024
Jaegeuk Kim31867b22018-12-28 11:00:38 -08002025 /*
2026 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2027 * f2fs_is_atomic_file.
2028 */
2029 if (get_dirty_pages(inode))
Joe Perchesdcbb4c12019-06-18 17:48:42 +08002030 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2031 inode->i_ino, get_dirty_pages(inode));
Jaegeuk Kimc27753d2016-04-12 14:36:11 -07002032 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002033 if (ret) {
2034 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Kinglong Mee684ca7e2017-03-18 09:20:55 +08002035 goto out;
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002036 }
Jaegeuk Kim31867b22018-12-28 11:00:38 -08002037
Jaegeuk Kim743b6202019-09-09 13:10:59 +01002038 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2039 if (list_empty(&fi->inmem_ilist))
2040 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
Sahitya Tummala677017d2019-11-13 16:01:03 +05302041 sbi->atomic_files++;
Jaegeuk Kim743b6202019-09-09 13:10:59 +01002042 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2043
2044 /* add inode in inmem_list first and set atomic_file */
Yunlei He054afda2018-04-18 11:06:39 +08002045 set_inode_flag(inode, FI_ATOMIC_FILE);
Chao Yu2ef79ec2018-05-07 20:28:54 +08002046 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002047 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Kinglong Mee684ca7e2017-03-18 09:20:55 +08002048
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002049 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kim7a10f012017-07-24 19:46:29 -07002050 F2FS_I(inode)->inmem_task = current;
Jaegeuk Kim26a28a02016-12-28 13:55:09 -08002051 stat_update_max_atomic_write(inode);
Kinglong Mee684ca7e2017-03-18 09:20:55 +08002052out:
Chao Yu0fac5582016-05-09 19:56:33 +08002053 inode_unlock(inode);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002054 mnt_drop_write_file(filp);
Jaegeuk Kimc27753d2016-04-12 14:36:11 -07002055 return ret;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002056}
2057
2058static int f2fs_ioc_commit_atomic_write(struct file *filp)
2059{
2060 struct inode *inode = file_inode(filp);
2061 int ret;
2062
Christian Brauner21cb47b2021-01-21 14:19:25 +01002063 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002064 return -EACCES;
2065
2066 ret = mnt_want_write_file(filp);
2067 if (ret)
2068 return ret;
2069
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002070 f2fs_balance_fs(F2FS_I_SB(inode), true);
Chao Yu0fac5582016-05-09 19:56:33 +08002071
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002072 inode_lock(inode);
Chao Yu1dc0f892018-02-27 22:45:24 +08002073
Chao Yub169c3c2018-04-18 17:45:02 +08002074 if (f2fs_is_volatile_file(inode)) {
2075 ret = -EINVAL;
Chao Yu7fb17fe2016-05-09 19:56:32 +08002076 goto err_out;
Chao Yub169c3c2018-04-18 17:45:02 +08002077 }
Chao Yu7fb17fe2016-05-09 19:56:32 +08002078
Jaegeuk Kim6282adb2015-07-25 00:29:17 -07002079 if (f2fs_is_atomic_file(inode)) {
Chao Yu4d57b862018-05-30 00:20:41 +08002080 ret = f2fs_commit_inmem_pages(inode);
Chao Yu5fe45742017-01-07 18:50:26 +08002081 if (ret)
Jaegeuk Kimedb27de2015-07-25 00:52:52 -07002082 goto err_out;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002083
Jaegeuk Kim26a28a02016-12-28 13:55:09 -08002084 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
Jaegeuk Kim743b6202019-09-09 13:10:59 +01002085 if (!ret)
2086 f2fs_drop_inmem_pages(inode);
Jaegeuk Kim26a28a02016-12-28 13:55:09 -08002087 } else {
Chao Yu774e1b72017-08-23 18:23:25 +08002088 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002089 }
Jaegeuk Kimedb27de2015-07-25 00:52:52 -07002090err_out:
Chao Yu2ef79ec2018-05-07 20:28:54 +08002091 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2092 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2093 ret = -EINVAL;
2094 }
Chao Yu0fac5582016-05-09 19:56:33 +08002095 inode_unlock(inode);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002096 mnt_drop_write_file(filp);
2097 return ret;
2098}
2099
Jaegeuk Kim02a13352014-10-06 16:11:16 -07002100static int f2fs_ioc_start_volatile_write(struct file *filp)
2101{
2102 struct inode *inode = file_inode(filp);
Chao Yuf4c9c742015-07-17 18:06:35 +08002103 int ret;
Jaegeuk Kim02a13352014-10-06 16:11:16 -07002104
Christian Brauner21cb47b2021-01-21 14:19:25 +01002105 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim02a13352014-10-06 16:11:16 -07002106 return -EACCES;
2107
Chao Yu8ff09712017-03-17 15:43:57 +08002108 if (!S_ISREG(inode->i_mode))
2109 return -EINVAL;
2110
Chao Yu7fb17fe2016-05-09 19:56:32 +08002111 ret = mnt_want_write_file(filp);
Chao Yuf4c9c742015-07-17 18:06:35 +08002112 if (ret)
2113 return ret;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002114
Chao Yu0fac5582016-05-09 19:56:33 +08002115 inode_lock(inode);
2116
Chao Yu7fb17fe2016-05-09 19:56:32 +08002117 if (f2fs_is_volatile_file(inode))
2118 goto out;
2119
2120 ret = f2fs_convert_inline_inode(inode);
2121 if (ret)
2122 goto out;
2123
Chao Yu648d50b2017-03-22 17:23:45 +08002124 stat_inc_volatile_write(inode);
2125 stat_update_max_volatile_write(inode);
2126
Jaegeuk Kim91942322016-05-20 10:13:22 -07002127 set_inode_flag(inode, FI_VOLATILE_FILE);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002128 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002129out:
Chao Yu0fac5582016-05-09 19:56:33 +08002130 inode_unlock(inode);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002131 mnt_drop_write_file(filp);
2132 return ret;
Jaegeuk Kim02a13352014-10-06 16:11:16 -07002133}
2134
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002135static int f2fs_ioc_release_volatile_write(struct file *filp)
2136{
2137 struct inode *inode = file_inode(filp);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002138 int ret;
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002139
Christian Brauner21cb47b2021-01-21 14:19:25 +01002140 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002141 return -EACCES;
2142
Chao Yu7fb17fe2016-05-09 19:56:32 +08002143 ret = mnt_want_write_file(filp);
2144 if (ret)
2145 return ret;
2146
Chao Yu0fac5582016-05-09 19:56:33 +08002147 inode_lock(inode);
2148
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002149 if (!f2fs_is_volatile_file(inode))
Chao Yu7fb17fe2016-05-09 19:56:32 +08002150 goto out;
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002151
Chao Yu7fb17fe2016-05-09 19:56:32 +08002152 if (!f2fs_is_first_block_written(inode)) {
2153 ret = truncate_partial_data_page(inode, 0, true);
2154 goto out;
2155 }
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -07002156
Chao Yu7fb17fe2016-05-09 19:56:32 +08002157 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2158out:
Chao Yu0fac5582016-05-09 19:56:33 +08002159 inode_unlock(inode);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002160 mnt_drop_write_file(filp);
2161 return ret;
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002162}
2163
2164static int f2fs_ioc_abort_volatile_write(struct file *filp)
2165{
2166 struct inode *inode = file_inode(filp);
2167 int ret;
2168
Christian Brauner21cb47b2021-01-21 14:19:25 +01002169 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002170 return -EACCES;
2171
2172 ret = mnt_want_write_file(filp);
2173 if (ret)
2174 return ret;
2175
Chao Yu0fac5582016-05-09 19:56:33 +08002176 inode_lock(inode);
2177
Jaegeuk Kim26dc3d42016-04-11 13:15:10 -07002178 if (f2fs_is_atomic_file(inode))
Chao Yu4d57b862018-05-30 00:20:41 +08002179 f2fs_drop_inmem_pages(inode);
Jaegeuk Kim732d5642015-12-29 15:46:33 -08002180 if (f2fs_is_volatile_file(inode)) {
Jaegeuk Kim91942322016-05-20 10:13:22 -07002181 clear_inode_flag(inode, FI_VOLATILE_FILE);
Chao Yu648d50b2017-03-22 17:23:45 +08002182 stat_dec_volatile_write(inode);
Jaegeuk Kim608514d2016-04-15 09:43:17 -07002183 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
Jaegeuk Kim732d5642015-12-29 15:46:33 -08002184 }
Jaegeuk Kimde6a8ec2015-06-08 17:51:10 -07002185
Jaegeuk Kim455e3a52018-07-27 18:15:11 +09002186 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2187
Chao Yu0fac5582016-05-09 19:56:33 +08002188 inode_unlock(inode);
2189
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002190 mnt_drop_write_file(filp);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002191 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002192 return ret;
2193}
2194
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002195static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2196{
2197 struct inode *inode = file_inode(filp);
2198 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2199 struct super_block *sb = sbi->sb;
2200 __u32 in;
Dan Carpenter2a96d8a2018-06-20 13:39:53 +03002201 int ret = 0;
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002202
2203 if (!capable(CAP_SYS_ADMIN))
2204 return -EPERM;
2205
2206 if (get_user(in, (__u32 __user *)arg))
2207 return -EFAULT;
2208
Sahitya Tummala60b2b4e2018-05-18 11:51:52 +05302209 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2210 ret = mnt_want_write_file(filp);
Chao Yu86264412020-06-08 20:03:16 +08002211 if (ret) {
2212 if (ret == -EROFS) {
2213 ret = 0;
2214 f2fs_stop_checkpoint(sbi, false);
2215 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2216 trace_f2fs_shutdown(sbi, in, ret);
2217 }
Sahitya Tummala60b2b4e2018-05-18 11:51:52 +05302218 return ret;
Chao Yu86264412020-06-08 20:03:16 +08002219 }
Sahitya Tummala60b2b4e2018-05-18 11:51:52 +05302220 }
Chao Yu7fb17fe2016-05-09 19:56:32 +08002221
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002222 switch (in) {
2223 case F2FS_GOING_DOWN_FULLSYNC:
Christoph Hellwig040f04b2020-11-24 11:54:06 +01002224 ret = freeze_bdev(sb->s_bdev);
2225 if (ret)
Chao Yud027c482018-01-17 22:28:52 +08002226 goto out;
Christoph Hellwig040f04b2020-11-24 11:54:06 +01002227 f2fs_stop_checkpoint(sbi, false);
2228 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2229 thaw_bdev(sb->s_bdev);
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002230 break;
2231 case F2FS_GOING_DOWN_METASYNC:
2232 /* do checkpoint only */
Chao Yud027c482018-01-17 22:28:52 +08002233 ret = f2fs_sync_fs(sb, 1);
2234 if (ret)
2235 goto out;
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -07002236 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim83a3bfd2018-06-21 13:46:23 -07002237 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002238 break;
2239 case F2FS_GOING_DOWN_NOSYNC:
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -07002240 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim83a3bfd2018-06-21 13:46:23 -07002241 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002242 break;
Jaegeuk Kimc912a822015-10-07 09:46:37 -07002243 case F2FS_GOING_DOWN_METAFLUSH:
Chao Yu4d57b862018-05-30 00:20:41 +08002244 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -07002245 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim83a3bfd2018-06-21 13:46:23 -07002246 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
Jaegeuk Kimc912a822015-10-07 09:46:37 -07002247 break;
Jaegeuk Kim0cd6d9b2018-11-28 13:26:03 -08002248 case F2FS_GOING_DOWN_NEED_FSCK:
2249 set_sbi_flag(sbi, SBI_NEED_FSCK);
Jaegeuk Kimdb610a62019-01-24 17:48:38 -08002250 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2251 set_sbi_flag(sbi, SBI_IS_DIRTY);
Jaegeuk Kim0cd6d9b2018-11-28 13:26:03 -08002252 /* do checkpoint only */
2253 ret = f2fs_sync_fs(sb, 1);
Jaegeuk Kimdb610a62019-01-24 17:48:38 -08002254 goto out;
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002255 default:
Chao Yu7fb17fe2016-05-09 19:56:32 +08002256 ret = -EINVAL;
2257 goto out;
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002258 }
Chao Yu7950e9a2018-01-18 17:23:29 +08002259
Chao Yu4d57b862018-05-30 00:20:41 +08002260 f2fs_stop_gc_thread(sbi);
2261 f2fs_stop_discard_thread(sbi);
Chao Yu7950e9a2018-01-18 17:23:29 +08002262
Chao Yu4d57b862018-05-30 00:20:41 +08002263 f2fs_drop_discard_cmd(sbi);
Chao Yu7950e9a2018-01-18 17:23:29 +08002264 clear_opt(sbi, DISCARD);
2265
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002266 f2fs_update_time(sbi, REQ_TIME);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002267out:
Sahitya Tummala60b2b4e2018-05-18 11:51:52 +05302268 if (in != F2FS_GOING_DOWN_FULLSYNC)
2269 mnt_drop_write_file(filp);
Chao Yu559e87c2019-02-26 19:01:15 +08002270
2271 trace_f2fs_shutdown(sbi, in, ret);
2272
Chao Yu7fb17fe2016-05-09 19:56:32 +08002273 return ret;
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002274}
2275
Jaegeuk Kim52656e62014-09-24 15:37:02 -07002276static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2277{
2278 struct inode *inode = file_inode(filp);
2279 struct super_block *sb = inode->i_sb;
2280 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2281 struct fstrim_range range;
2282 int ret;
2283
2284 if (!capable(CAP_SYS_ADMIN))
2285 return -EPERM;
2286
Chao Yu7d20c8a2018-09-04 03:52:17 +08002287 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
Jaegeuk Kim52656e62014-09-24 15:37:02 -07002288 return -EOPNOTSUPP;
2289
2290 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2291 sizeof(range)))
2292 return -EFAULT;
2293
Chao Yu7fb17fe2016-05-09 19:56:32 +08002294 ret = mnt_want_write_file(filp);
2295 if (ret)
2296 return ret;
2297
Jaegeuk Kim52656e62014-09-24 15:37:02 -07002298 range.minlen = max((unsigned int)range.minlen,
2299 q->limits.discard_granularity);
2300 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002301 mnt_drop_write_file(filp);
Jaegeuk Kim52656e62014-09-24 15:37:02 -07002302 if (ret < 0)
2303 return ret;
2304
2305 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2306 sizeof(range)))
2307 return -EFAULT;
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002308 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kim52656e62014-09-24 15:37:02 -07002309 return 0;
2310}
2311
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002312static bool uuid_is_nonzero(__u8 u[16])
2313{
2314 int i;
2315
2316 for (i = 0; i < 16; i++)
2317 if (u[i])
2318 return true;
2319 return false;
2320}
2321
2322static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2323{
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002324 struct inode *inode = file_inode(filp);
2325
Chao Yu7beb01f2018-10-24 18:34:26 +08002326 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
Chao Yuead710b2017-11-14 19:28:42 +08002327 return -EOPNOTSUPP;
2328
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002329 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002330
Eric Biggersdb717d82016-11-26 19:07:49 -05002331 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002332}
2333
2334static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2335{
Chao Yu7beb01f2018-10-24 18:34:26 +08002336 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
Chao Yuead710b2017-11-14 19:28:42 +08002337 return -EOPNOTSUPP;
Eric Biggersdb717d82016-11-26 19:07:49 -05002338 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002339}
2340
2341static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2342{
2343 struct inode *inode = file_inode(filp);
2344 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2345 int err;
2346
Chao Yu7beb01f2018-10-24 18:34:26 +08002347 if (!f2fs_sb_has_encrypt(sbi))
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002348 return -EOPNOTSUPP;
2349
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002350 err = mnt_want_write_file(filp);
2351 if (err)
2352 return err;
2353
Chao Yu846ae672018-02-26 22:04:13 +08002354 down_write(&sbi->sb_lock);
Chao Yud0d3f1b2018-02-11 22:53:20 +08002355
2356 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2357 goto got_it;
2358
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002359 /* update superblock with uuid */
2360 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2361
Chao Yuc5bda1c2015-06-08 13:28:03 +08002362 err = f2fs_commit_super(sbi, false);
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002363 if (err) {
2364 /* undo new data */
2365 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
Chao Yud0d3f1b2018-02-11 22:53:20 +08002366 goto out_err;
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002367 }
2368got_it:
2369 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2370 16))
Chao Yud0d3f1b2018-02-11 22:53:20 +08002371 err = -EFAULT;
2372out_err:
Chao Yu846ae672018-02-26 22:04:13 +08002373 up_write(&sbi->sb_lock);
Chao Yud0d3f1b2018-02-11 22:53:20 +08002374 mnt_drop_write_file(filp);
2375 return err;
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002376}
2377
Eric Biggers8ce589c2019-08-04 19:35:48 -07002378static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2379 unsigned long arg)
2380{
2381 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2382 return -EOPNOTSUPP;
2383
2384 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2385}
2386
2387static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2388{
2389 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2390 return -EOPNOTSUPP;
2391
2392 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2393}
2394
2395static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2396{
2397 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2398 return -EOPNOTSUPP;
2399
2400 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2401}
2402
2403static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2404 unsigned long arg)
2405{
2406 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2407 return -EOPNOTSUPP;
2408
2409 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2410}
2411
2412static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2413 unsigned long arg)
2414{
2415 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2416 return -EOPNOTSUPP;
2417
2418 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2419}
2420
Eric Biggersee446e12020-03-14 13:50:51 -07002421static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2422{
2423 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2424 return -EOPNOTSUPP;
2425
2426 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2427}
2428
Chao Yuc1c1b582015-07-10 18:08:10 +08002429static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2430{
2431 struct inode *inode = file_inode(filp);
2432 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yud530d4d2015-10-05 22:22:44 +08002433 __u32 sync;
Chao Yu7fb17fe2016-05-09 19:56:32 +08002434 int ret;
Chao Yuc1c1b582015-07-10 18:08:10 +08002435
2436 if (!capable(CAP_SYS_ADMIN))
2437 return -EPERM;
2438
Chao Yud530d4d2015-10-05 22:22:44 +08002439 if (get_user(sync, (__u32 __user *)arg))
Chao Yuc1c1b582015-07-10 18:08:10 +08002440 return -EFAULT;
2441
Chao Yud530d4d2015-10-05 22:22:44 +08002442 if (f2fs_readonly(sbi->sb))
2443 return -EROFS;
Chao Yuc1c1b582015-07-10 18:08:10 +08002444
Chao Yu7fb17fe2016-05-09 19:56:32 +08002445 ret = mnt_want_write_file(filp);
2446 if (ret)
2447 return ret;
2448
Chao Yud530d4d2015-10-05 22:22:44 +08002449 if (!sync) {
Chao Yufb24fea2020-01-14 19:36:50 +08002450 if (!down_write_trylock(&sbi->gc_lock)) {
Chao Yu7fb17fe2016-05-09 19:56:32 +08002451 ret = -EBUSY;
2452 goto out;
2453 }
Chao Yud530d4d2015-10-05 22:22:44 +08002454 } else {
Chao Yufb24fea2020-01-14 19:36:50 +08002455 down_write(&sbi->gc_lock);
Chao Yuc1c1b582015-07-10 18:08:10 +08002456 }
2457
Chao Yu7dede8862021-02-20 17:35:40 +08002458 ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002459out:
2460 mnt_drop_write_file(filp);
2461 return ret;
Chao Yuc1c1b582015-07-10 18:08:10 +08002462}
2463
Chao Yu34178b1b2020-11-10 09:24:37 +08002464static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002465{
Chao Yu34178b1b2020-11-10 09:24:37 +08002466 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002467 u64 end;
2468 int ret;
2469
2470 if (!capable(CAP_SYS_ADMIN))
2471 return -EPERM;
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002472 if (f2fs_readonly(sbi->sb))
2473 return -EROFS;
2474
Chao Yu34178b1b2020-11-10 09:24:37 +08002475 end = range->start + range->len;
2476 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
Sahitya Tummalafbbf7792019-09-17 10:19:23 +05302477 end >= MAX_BLKADDR(sbi))
Yunlei Heb82f6e32018-04-24 11:40:30 +08002478 return -EINVAL;
Yunlei Heb82f6e32018-04-24 11:40:30 +08002479
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002480 ret = mnt_want_write_file(filp);
2481 if (ret)
2482 return ret;
2483
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002484do_more:
Chao Yu34178b1b2020-11-10 09:24:37 +08002485 if (!range->sync) {
Chao Yufb24fea2020-01-14 19:36:50 +08002486 if (!down_write_trylock(&sbi->gc_lock)) {
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002487 ret = -EBUSY;
2488 goto out;
2489 }
2490 } else {
Chao Yufb24fea2020-01-14 19:36:50 +08002491 down_write(&sbi->gc_lock);
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002492 }
2493
Chao Yu7dede8862021-02-20 17:35:40 +08002494 ret = f2fs_gc(sbi, range->sync, true, false,
2495 GET_SEGNO(sbi, range->start));
Qilong Zhang97767502020-06-28 19:23:03 +08002496 if (ret) {
2497 if (ret == -EBUSY)
2498 ret = -EAGAIN;
2499 goto out;
2500 }
Chao Yu34178b1b2020-11-10 09:24:37 +08002501 range->start += BLKS_PER_SEC(sbi);
2502 if (range->start <= end)
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002503 goto do_more;
2504out:
2505 mnt_drop_write_file(filp);
2506 return ret;
2507}
2508
Chao Yu34178b1b2020-11-10 09:24:37 +08002509static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2510{
2511 struct f2fs_gc_range range;
2512
2513 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2514 sizeof(range)))
2515 return -EFAULT;
2516 return __f2fs_ioc_gc_range(filp, &range);
2517}
2518
Chao Yu059c0642018-07-17 20:41:49 +08002519static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
Chao Yu456b88e2015-10-05 22:24:19 +08002520{
2521 struct inode *inode = file_inode(filp);
2522 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002523 int ret;
Chao Yu456b88e2015-10-05 22:24:19 +08002524
2525 if (!capable(CAP_SYS_ADMIN))
2526 return -EPERM;
2527
2528 if (f2fs_readonly(sbi->sb))
2529 return -EROFS;
2530
Daniel Rosenberg43549942018-08-20 19:21:43 -07002531 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +08002532 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
Daniel Rosenberg43549942018-08-20 19:21:43 -07002533 return -EINVAL;
2534 }
2535
Chao Yu7fb17fe2016-05-09 19:56:32 +08002536 ret = mnt_want_write_file(filp);
2537 if (ret)
2538 return ret;
2539
2540 ret = f2fs_sync_fs(sbi->sb, 1);
2541
2542 mnt_drop_write_file(filp);
2543 return ret;
Chao Yu456b88e2015-10-05 22:24:19 +08002544}
2545
Chao Yud323d002015-10-27 09:53:45 +08002546static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2547 struct file *filp,
2548 struct f2fs_defragment *range)
2549{
2550 struct inode *inode = file_inode(filp);
Chao Yuf3d98e72018-01-10 18:18:52 +08002551 struct f2fs_map_blocks map = { .m_next_extent = NULL,
Yi Zhuang5f029c02021-04-06 09:47:35 +08002552 .m_seg_type = NO_CHECK_TYPE,
Jia Zhuf4f0b672018-11-20 04:29:35 +08002553 .m_may_create = false };
youngjun yoo1061fd42018-05-30 04:34:58 +09002554 struct extent_info ei = {0, 0, 0};
Chao Yuf3d98e72018-01-10 18:18:52 +08002555 pgoff_t pg_start, pg_end, next_pgofs;
Chao Yu3519e3f2015-12-01 11:56:52 +08002556 unsigned int blk_per_seg = sbi->blocks_per_seg;
Chao Yud323d002015-10-27 09:53:45 +08002557 unsigned int total = 0, sec_num;
Chao Yud323d002015-10-27 09:53:45 +08002558 block_t blk_end = 0;
2559 bool fragmented = false;
2560 int err;
2561
2562 /* if in-place-update policy is enabled, don't waste time here */
Chao Yu4d57b862018-05-30 00:20:41 +08002563 if (f2fs_should_update_inplace(inode, NULL))
Chao Yud323d002015-10-27 09:53:45 +08002564 return -EINVAL;
2565
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002566 pg_start = range->start >> PAGE_SHIFT;
2567 pg_end = (range->start + range->len) >> PAGE_SHIFT;
Chao Yud323d002015-10-27 09:53:45 +08002568
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08002569 f2fs_balance_fs(sbi, true);
Chao Yud323d002015-10-27 09:53:45 +08002570
Al Viro59551022016-01-22 15:40:57 -05002571 inode_lock(inode);
Chao Yud323d002015-10-27 09:53:45 +08002572
2573 /* writeback all dirty pages in the range */
2574 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
Fan Lid8fe4f02015-12-14 13:34:00 +08002575 range->start + range->len - 1);
Chao Yud323d002015-10-27 09:53:45 +08002576 if (err)
2577 goto out;
2578
2579 /*
2580 * lookup mapping info in extent cache, skip defragmenting if physical
2581 * block addresses are continuous.
2582 */
2583 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2584 if (ei.fofs + ei.len >= pg_end)
2585 goto out;
2586 }
2587
2588 map.m_lblk = pg_start;
Chao Yuf3d98e72018-01-10 18:18:52 +08002589 map.m_next_pgofs = &next_pgofs;
Chao Yud323d002015-10-27 09:53:45 +08002590
2591 /*
2592 * lookup mapping info in dnode page cache, skip defragmenting if all
2593 * physical block addresses are continuous even if there are hole(s)
2594 * in logical blocks.
2595 */
2596 while (map.m_lblk < pg_end) {
Fan Lia1c1e9b2015-12-15 17:02:41 +08002597 map.m_len = pg_end - map.m_lblk;
Qiuyang Sunf2220c72017-08-09 17:27:30 +08002598 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
Chao Yud323d002015-10-27 09:53:45 +08002599 if (err)
2600 goto out;
2601
2602 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
Chao Yuf3d98e72018-01-10 18:18:52 +08002603 map.m_lblk = next_pgofs;
Chao Yud323d002015-10-27 09:53:45 +08002604 continue;
2605 }
2606
Chao Yu25a912e2018-01-10 18:18:51 +08002607 if (blk_end && blk_end != map.m_pblk)
Chao Yud323d002015-10-27 09:53:45 +08002608 fragmented = true;
Chao Yu25a912e2018-01-10 18:18:51 +08002609
2610 /* record total count of block that we're going to move */
2611 total += map.m_len;
2612
Chao Yud323d002015-10-27 09:53:45 +08002613 blk_end = map.m_pblk + map.m_len;
2614
2615 map.m_lblk += map.m_len;
Chao Yud323d002015-10-27 09:53:45 +08002616 }
2617
Chao Yud3a1a0e2019-07-29 23:02:29 +08002618 if (!fragmented) {
2619 total = 0;
Chao Yud323d002015-10-27 09:53:45 +08002620 goto out;
Chao Yud3a1a0e2019-07-29 23:02:29 +08002621 }
Chao Yud323d002015-10-27 09:53:45 +08002622
Geert Uytterhoevenf91108b2019-06-20 16:42:08 +02002623 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
Chao Yud323d002015-10-27 09:53:45 +08002624
2625 /*
2626 * make sure there are enough free section for LFS allocation, this can
2627 * avoid defragment running in SSR mode when free section are allocated
2628 * intensively
2629 */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -07002630 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
Chao Yud323d002015-10-27 09:53:45 +08002631 err = -EAGAIN;
2632 goto out;
2633 }
2634
Chao Yu25a912e2018-01-10 18:18:51 +08002635 map.m_lblk = pg_start;
2636 map.m_len = pg_end - pg_start;
2637 total = 0;
2638
Chao Yud323d002015-10-27 09:53:45 +08002639 while (map.m_lblk < pg_end) {
2640 pgoff_t idx;
2641 int cnt = 0;
2642
2643do_map:
Fan Lia1c1e9b2015-12-15 17:02:41 +08002644 map.m_len = pg_end - map.m_lblk;
Qiuyang Sunf2220c72017-08-09 17:27:30 +08002645 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
Chao Yud323d002015-10-27 09:53:45 +08002646 if (err)
2647 goto clear_out;
2648
2649 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
Chao Yuf3d98e72018-01-10 18:18:52 +08002650 map.m_lblk = next_pgofs;
Chao Yud3a1a0e2019-07-29 23:02:29 +08002651 goto check;
Chao Yud323d002015-10-27 09:53:45 +08002652 }
2653
Jaegeuk Kim91942322016-05-20 10:13:22 -07002654 set_inode_flag(inode, FI_DO_DEFRAG);
Chao Yud323d002015-10-27 09:53:45 +08002655
2656 idx = map.m_lblk;
2657 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2658 struct page *page;
2659
Chao Yu4d57b862018-05-30 00:20:41 +08002660 page = f2fs_get_lock_data_page(inode, idx, true);
Chao Yud323d002015-10-27 09:53:45 +08002661 if (IS_ERR(page)) {
2662 err = PTR_ERR(page);
2663 goto clear_out;
2664 }
2665
2666 set_page_dirty(page);
2667 f2fs_put_page(page, 1);
2668
2669 idx++;
2670 cnt++;
2671 total++;
2672 }
2673
2674 map.m_lblk = idx;
Chao Yud3a1a0e2019-07-29 23:02:29 +08002675check:
2676 if (map.m_lblk < pg_end && cnt < blk_per_seg)
Chao Yud323d002015-10-27 09:53:45 +08002677 goto do_map;
2678
Jaegeuk Kim91942322016-05-20 10:13:22 -07002679 clear_inode_flag(inode, FI_DO_DEFRAG);
Chao Yud323d002015-10-27 09:53:45 +08002680
2681 err = filemap_fdatawrite(inode->i_mapping);
2682 if (err)
2683 goto out;
2684 }
2685clear_out:
Jaegeuk Kim91942322016-05-20 10:13:22 -07002686 clear_inode_flag(inode, FI_DO_DEFRAG);
Chao Yud323d002015-10-27 09:53:45 +08002687out:
Al Viro59551022016-01-22 15:40:57 -05002688 inode_unlock(inode);
Chao Yud323d002015-10-27 09:53:45 +08002689 if (!err)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002690 range->len = (u64)total << PAGE_SHIFT;
Chao Yud323d002015-10-27 09:53:45 +08002691 return err;
2692}
2693
2694static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2695{
2696 struct inode *inode = file_inode(filp);
2697 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2698 struct f2fs_defragment range;
2699 int err;
2700
2701 if (!capable(CAP_SYS_ADMIN))
2702 return -EPERM;
2703
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002704 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
Chao Yud323d002015-10-27 09:53:45 +08002705 return -EINVAL;
2706
Kinglong Meed7563862017-03-10 17:55:07 +08002707 if (f2fs_readonly(sbi->sb))
2708 return -EROFS;
2709
2710 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2711 sizeof(range)))
2712 return -EFAULT;
2713
2714 /* verify alignment of offset & size */
2715 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2716 return -EINVAL;
2717
2718 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
Chengguang Xu6d1451b2021-01-13 13:21:54 +08002719 max_file_blocks(inode)))
Kinglong Meed7563862017-03-10 17:55:07 +08002720 return -EINVAL;
2721
Chao Yud323d002015-10-27 09:53:45 +08002722 err = mnt_want_write_file(filp);
2723 if (err)
2724 return err;
2725
Chao Yud323d002015-10-27 09:53:45 +08002726 err = f2fs_defragment_range(sbi, filp, &range);
Kinglong Meed7563862017-03-10 17:55:07 +08002727 mnt_drop_write_file(filp);
2728
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002729 f2fs_update_time(sbi, REQ_TIME);
Chao Yud323d002015-10-27 09:53:45 +08002730 if (err < 0)
Kinglong Meed7563862017-03-10 17:55:07 +08002731 return err;
Chao Yud323d002015-10-27 09:53:45 +08002732
2733 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2734 sizeof(range)))
Kinglong Meed7563862017-03-10 17:55:07 +08002735 return -EFAULT;
2736
2737 return 0;
Chao Yud323d002015-10-27 09:53:45 +08002738}
2739
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002740static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2741 struct file *file_out, loff_t pos_out, size_t len)
2742{
2743 struct inode *src = file_inode(file_in);
2744 struct inode *dst = file_inode(file_out);
2745 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2746 size_t olen = len, dst_max_i_size = 0;
2747 size_t dst_osize;
2748 int ret;
2749
2750 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2751 src->i_sb != dst->i_sb)
2752 return -EXDEV;
2753
2754 if (unlikely(f2fs_readonly(src->i_sb)))
2755 return -EROFS;
2756
Chao Yufe8494b2016-08-04 20:13:02 +08002757 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2758 return -EINVAL;
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002759
Chandan Rajendra62230e0d2018-12-12 15:20:11 +05302760 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002761 return -EOPNOTSUPP;
2762
Dan Robertsonaad13832020-08-30 21:45:23 +00002763 if (pos_out < 0 || pos_in < 0)
2764 return -EINVAL;
2765
Fan Lid95fd912016-09-13 11:35:42 +08002766 if (src == dst) {
2767 if (pos_in == pos_out)
2768 return 0;
2769 if (pos_out > pos_in && pos_out < pos_in + len)
2770 return -EINVAL;
2771 }
2772
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002773 inode_lock(src);
Chao Yu20a3d612016-08-04 20:13:03 +08002774 if (src != dst) {
Chao Yubb066642017-11-03 10:21:05 +08002775 ret = -EBUSY;
2776 if (!inode_trylock(dst))
2777 goto out;
Chao Yu20a3d612016-08-04 20:13:03 +08002778 }
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002779
2780 ret = -EINVAL;
2781 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2782 goto out_unlock;
2783 if (len == 0)
2784 olen = len = src->i_size - pos_in;
2785 if (pos_in + len == src->i_size)
2786 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2787 if (len == 0) {
2788 ret = 0;
2789 goto out_unlock;
2790 }
2791
2792 dst_osize = dst->i_size;
2793 if (pos_out + olen > dst->i_size)
2794 dst_max_i_size = pos_out + olen;
2795
2796 /* verify the end result is block aligned */
2797 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2798 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2799 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2800 goto out_unlock;
2801
2802 ret = f2fs_convert_inline_inode(src);
2803 if (ret)
2804 goto out_unlock;
2805
2806 ret = f2fs_convert_inline_inode(dst);
2807 if (ret)
2808 goto out_unlock;
2809
2810 /* write out all dirty pages from offset */
2811 ret = filemap_write_and_wait_range(src->i_mapping,
2812 pos_in, pos_in + len);
2813 if (ret)
2814 goto out_unlock;
2815
2816 ret = filemap_write_and_wait_range(dst->i_mapping,
2817 pos_out, pos_out + len);
2818 if (ret)
2819 goto out_unlock;
2820
2821 f2fs_balance_fs(sbi, true);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002822
2823 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2824 if (src != dst) {
2825 ret = -EBUSY;
2826 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2827 goto out_src;
2828 }
2829
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002830 f2fs_lock_op(sbi);
Fan Li61e4da12016-09-10 11:19:37 +08002831 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2832 pos_out >> F2FS_BLKSIZE_BITS,
2833 len >> F2FS_BLKSIZE_BITS, false);
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002834
2835 if (!ret) {
2836 if (dst_max_i_size)
2837 f2fs_i_size_write(dst, dst_max_i_size);
2838 else if (dst_osize != dst->i_size)
2839 f2fs_i_size_write(dst, dst_osize);
2840 }
2841 f2fs_unlock_op(sbi);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002842
2843 if (src != dst)
Chao Yub2532c62018-04-24 10:55:28 +08002844 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002845out_src:
Chao Yub2532c62018-04-24 10:55:28 +08002846 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002847out_unlock:
2848 if (src != dst)
2849 inode_unlock(dst);
2850out:
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002851 inode_unlock(src);
2852 return ret;
2853}
2854
Chao Yu34178b1b2020-11-10 09:24:37 +08002855static int __f2fs_ioc_move_range(struct file *filp,
2856 struct f2fs_move_range *range)
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002857{
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002858 struct fd dst;
2859 int err;
2860
2861 if (!(filp->f_mode & FMODE_READ) ||
2862 !(filp->f_mode & FMODE_WRITE))
2863 return -EBADF;
2864
Chao Yu34178b1b2020-11-10 09:24:37 +08002865 dst = fdget(range->dst_fd);
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002866 if (!dst.file)
2867 return -EBADF;
2868
2869 if (!(dst.file->f_mode & FMODE_WRITE)) {
2870 err = -EBADF;
2871 goto err_out;
2872 }
2873
2874 err = mnt_want_write_file(filp);
2875 if (err)
2876 goto err_out;
2877
Chao Yu34178b1b2020-11-10 09:24:37 +08002878 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2879 range->pos_out, range->len);
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002880
2881 mnt_drop_write_file(filp);
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002882err_out:
2883 fdput(dst);
2884 return err;
2885}
2886
Chao Yu34178b1b2020-11-10 09:24:37 +08002887static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2888{
2889 struct f2fs_move_range range;
2890
2891 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2892 sizeof(range)))
2893 return -EFAULT;
2894 return __f2fs_ioc_move_range(filp, &range);
2895}
2896
Jaegeuk Kime066b832017-04-13 15:17:00 -07002897static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2898{
2899 struct inode *inode = file_inode(filp);
2900 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2901 struct sit_info *sm = SIT_I(sbi);
2902 unsigned int start_segno = 0, end_segno = 0;
2903 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2904 struct f2fs_flush_device range;
2905 int ret;
2906
2907 if (!capable(CAP_SYS_ADMIN))
2908 return -EPERM;
2909
2910 if (f2fs_readonly(sbi->sb))
2911 return -EROFS;
2912
Daniel Rosenberg43549942018-08-20 19:21:43 -07002913 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2914 return -EINVAL;
2915
Jaegeuk Kime066b832017-04-13 15:17:00 -07002916 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2917 sizeof(range)))
2918 return -EFAULT;
2919
Damien Le Moal09168782019-03-16 09:13:06 +09002920 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
Chao Yu2c70c5e2018-10-24 18:37:26 +08002921 __is_large_section(sbi)) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +08002922 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2923 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
Jaegeuk Kime066b832017-04-13 15:17:00 -07002924 return -EINVAL;
2925 }
2926
2927 ret = mnt_want_write_file(filp);
2928 if (ret)
2929 return ret;
2930
2931 if (range.dev_num != 0)
2932 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2933 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2934
2935 start_segno = sm->last_victim[FLUSH_DEVICE];
2936 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2937 start_segno = dev_start_segno;
2938 end_segno = min(start_segno + range.segments, dev_end_segno);
2939
2940 while (start_segno < end_segno) {
Chao Yufb24fea2020-01-14 19:36:50 +08002941 if (!down_write_trylock(&sbi->gc_lock)) {
Jaegeuk Kime066b832017-04-13 15:17:00 -07002942 ret = -EBUSY;
2943 goto out;
2944 }
2945 sm->last_victim[GC_CB] = end_segno + 1;
2946 sm->last_victim[GC_GREEDY] = end_segno + 1;
2947 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
Chao Yu7dede8862021-02-20 17:35:40 +08002948 ret = f2fs_gc(sbi, true, true, true, start_segno);
Jaegeuk Kime066b832017-04-13 15:17:00 -07002949 if (ret == -EAGAIN)
2950 ret = 0;
2951 else if (ret < 0)
2952 break;
2953 start_segno++;
2954 }
2955out:
2956 mnt_drop_write_file(filp);
2957 return ret;
2958}
2959
Jaegeuk Kime65ef202017-07-21 12:58:59 -07002960static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2961{
2962 struct inode *inode = file_inode(filp);
2963 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2964
2965 /* Must validate to set it with SQLite behavior in Android. */
2966 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2967
2968 return put_user(sb_feature, (u32 __user *)arg);
2969}
Jaegeuk Kime066b832017-04-13 15:17:00 -07002970
Chao Yu2c1d0302017-07-29 00:32:52 +08002971#ifdef CONFIG_QUOTA
Chao Yu78130812018-09-25 15:36:02 +08002972int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2973{
2974 struct dquot *transfer_to[MAXQUOTAS] = {};
2975 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2976 struct super_block *sb = sbi->sb;
2977 int err = 0;
2978
2979 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2980 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2981 err = __dquot_transfer(inode, transfer_to);
2982 if (err)
2983 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2984 dqput(transfer_to[PRJQUOTA]);
2985 }
2986 return err;
2987}
2988
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02002989static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
Chao Yu2c1d0302017-07-29 00:32:52 +08002990{
Chao Yu2c1d0302017-07-29 00:32:52 +08002991 struct f2fs_inode_info *fi = F2FS_I(inode);
2992 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu2c1d0302017-07-29 00:32:52 +08002993 struct page *ipage;
2994 kprojid_t kprojid;
2995 int err;
2996
Chao Yu7beb01f2018-10-24 18:34:26 +08002997 if (!f2fs_sb_has_project_quota(sbi)) {
Chao Yu2c1d0302017-07-29 00:32:52 +08002998 if (projid != F2FS_DEF_PROJID)
2999 return -EOPNOTSUPP;
3000 else
3001 return 0;
3002 }
3003
3004 if (!f2fs_has_extra_attr(inode))
3005 return -EOPNOTSUPP;
3006
3007 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3008
3009 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3010 return 0;
3011
Chao Yu2c1d0302017-07-29 00:32:52 +08003012 err = -EPERM;
Chao Yu2c1d0302017-07-29 00:32:52 +08003013 /* Is it quota file? Do not allow user to mess with it */
3014 if (IS_NOQUOTA(inode))
Wang Shilongc8e92752018-09-11 08:54:21 +09003015 return err;
Chao Yu2c1d0302017-07-29 00:32:52 +08003016
Chao Yu4d57b862018-05-30 00:20:41 +08003017 ipage = f2fs_get_node_page(sbi, inode->i_ino);
Wang Shilongc8e92752018-09-11 08:54:21 +09003018 if (IS_ERR(ipage))
3019 return PTR_ERR(ipage);
Chao Yu2c1d0302017-07-29 00:32:52 +08003020
3021 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3022 i_projid)) {
3023 err = -EOVERFLOW;
3024 f2fs_put_page(ipage, 1);
Wang Shilongc8e92752018-09-11 08:54:21 +09003025 return err;
Chao Yu2c1d0302017-07-29 00:32:52 +08003026 }
3027 f2fs_put_page(ipage, 1);
3028
Chao Yu10a26872021-10-28 21:03:05 +08003029 err = f2fs_dquot_initialize(inode);
Chao Yuc22aecd2018-04-21 17:53:52 +08003030 if (err)
Wang Shilongc8e92752018-09-11 08:54:21 +09003031 return err;
Chao Yu2c1d0302017-07-29 00:32:52 +08003032
Chao Yu78130812018-09-25 15:36:02 +08003033 f2fs_lock_op(sbi);
3034 err = f2fs_transfer_project_quota(inode, kprojid);
3035 if (err)
3036 goto out_unlock;
Chao Yu2c1d0302017-07-29 00:32:52 +08003037
3038 F2FS_I(inode)->i_projid = kprojid;
3039 inode->i_ctime = current_time(inode);
Chao Yu2c1d0302017-07-29 00:32:52 +08003040 f2fs_mark_inode_dirty_sync(inode, true);
Chao Yu78130812018-09-25 15:36:02 +08003041out_unlock:
3042 f2fs_unlock_op(sbi);
Chao Yu2c1d0302017-07-29 00:32:52 +08003043 return err;
3044}
3045#else
Chao Yu78130812018-09-25 15:36:02 +08003046int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3047{
3048 return 0;
3049}
3050
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003051static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
Chao Yu2c1d0302017-07-29 00:32:52 +08003052{
3053 if (projid != F2FS_DEF_PROJID)
3054 return -EOPNOTSUPP;
3055 return 0;
3056}
3057#endif
3058
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003059int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
Eric Biggers36098552019-06-04 22:59:04 -07003060{
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003061 struct inode *inode = d_inode(dentry);
Eric Biggers6fc93c42019-07-01 13:26:29 -07003062 struct f2fs_inode_info *fi = F2FS_I(inode);
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003063 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
Eric Biggers6fc93c42019-07-01 13:26:29 -07003064
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003065 if (IS_ENCRYPTED(inode))
3066 fsflags |= FS_ENCRYPT_FL;
3067 if (IS_VERITY(inode))
3068 fsflags |= FS_VERITY_FL;
3069 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3070 fsflags |= FS_INLINE_DATA_FL;
3071 if (is_inode_flag_set(inode, FI_PIN_FILE))
3072 fsflags |= FS_NOCOW_FL;
3073
3074 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
Eric Biggers6fc93c42019-07-01 13:26:29 -07003075
3076 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3077 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
Eric Biggers6fc93c42019-07-01 13:26:29 -07003078
Chao Yu2c1d0302017-07-29 00:32:52 +08003079 return 0;
3080}
3081
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003082int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3083 struct dentry *dentry, struct fileattr *fa)
Chao Yu2c1d0302017-07-29 00:32:52 +08003084{
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003085 struct inode *inode = d_inode(dentry);
3086 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
Eric Biggers36098552019-06-04 22:59:04 -07003087 u32 iflags;
Chao Yu2c1d0302017-07-29 00:32:52 +08003088 int err;
3089
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003090 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3091 return -EIO;
3092 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3093 return -ENOSPC;
3094 if (fsflags & ~F2FS_GETTABLE_FS_FL)
Chao Yu2c1d0302017-07-29 00:32:52 +08003095 return -EOPNOTSUPP;
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003096 fsflags &= F2FS_SETTABLE_FS_FL;
3097 if (!fa->flags_valid)
3098 mask &= FS_COMMON_FL;
Chao Yu2c1d0302017-07-29 00:32:52 +08003099
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003100 iflags = f2fs_fsflags_to_iflags(fsflags);
Eric Biggers36098552019-06-04 22:59:04 -07003101 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
Chao Yu2c1d0302017-07-29 00:32:52 +08003102 return -EOPNOTSUPP;
3103
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003104 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3105 if (!err)
3106 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
Chao Yu2c1d0302017-07-29 00:32:52 +08003107
Wang Shilongc8e92752018-09-11 08:54:21 +09003108 return err;
Chao Yu2c1d0302017-07-29 00:32:52 +08003109}
Jaegeuk Kim52656e62014-09-24 15:37:02 -07003110
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003111int f2fs_pin_file_control(struct inode *inode, bool inc)
3112{
3113 struct f2fs_inode_info *fi = F2FS_I(inode);
3114 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3115
3116 /* Use i_gc_failures for normal file as a risk signal. */
3117 if (inc)
Chao Yu2ef79ec2018-05-07 20:28:54 +08003118 f2fs_i_gc_failures_write(inode,
3119 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003120
Chao Yu2ef79ec2018-05-07 20:28:54 +08003121 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +08003122 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3123 __func__, inode->i_ino,
3124 fi->i_gc_failures[GC_FAILURE_PIN]);
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003125 clear_inode_flag(inode, FI_PIN_FILE);
3126 return -EAGAIN;
3127 }
3128 return 0;
3129}
3130
3131static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3132{
3133 struct inode *inode = file_inode(filp);
3134 __u32 pin;
3135 int ret = 0;
3136
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003137 if (get_user(pin, (__u32 __user *)arg))
3138 return -EFAULT;
3139
3140 if (!S_ISREG(inode->i_mode))
3141 return -EINVAL;
3142
3143 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3144 return -EROFS;
3145
3146 ret = mnt_want_write_file(filp);
3147 if (ret)
3148 return ret;
3149
3150 inode_lock(inode);
3151
3152 if (!pin) {
3153 clear_inode_flag(inode, FI_PIN_FILE);
Chao Yu30933362018-07-28 18:37:58 +08003154 f2fs_i_gc_failures_write(inode, 0);
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003155 goto done;
3156 }
3157
Jaegeuk Kim19bdba52021-12-09 10:25:43 -08003158 if (f2fs_should_update_outplace(inode, NULL)) {
3159 ret = -EINVAL;
3160 goto out;
3161 }
3162
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003163 if (f2fs_pin_file_control(inode, false)) {
3164 ret = -EAGAIN;
3165 goto out;
3166 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003167
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003168 ret = f2fs_convert_inline_inode(inode);
3169 if (ret)
3170 goto out;
3171
Daeho Jeong78134d02020-09-08 11:44:11 +09003172 if (!f2fs_disable_compressed_file(inode)) {
Chao Yu4c8ff702019-11-01 18:07:14 +08003173 ret = -EOPNOTSUPP;
3174 goto out;
3175 }
3176
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003177 set_inode_flag(inode, FI_PIN_FILE);
Chao Yu2ef79ec2018-05-07 20:28:54 +08003178 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003179done:
3180 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3181out:
3182 inode_unlock(inode);
3183 mnt_drop_write_file(filp);
3184 return ret;
3185}
3186
3187static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3188{
3189 struct inode *inode = file_inode(filp);
3190 __u32 pin = 0;
3191
3192 if (is_inode_flag_set(inode, FI_PIN_FILE))
Chao Yu2ef79ec2018-05-07 20:28:54 +08003193 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003194 return put_user(pin, (u32 __user *)arg);
3195}
3196
Chao Yuc4020b22018-01-11 14:42:30 +08003197int f2fs_precache_extents(struct inode *inode)
3198{
3199 struct f2fs_inode_info *fi = F2FS_I(inode);
3200 struct f2fs_map_blocks map;
3201 pgoff_t m_next_extent;
3202 loff_t end;
3203 int err;
3204
3205 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3206 return -EOPNOTSUPP;
3207
3208 map.m_lblk = 0;
3209 map.m_next_pgofs = NULL;
3210 map.m_next_extent = &m_next_extent;
3211 map.m_seg_type = NO_CHECK_TYPE;
Jia Zhuf4f0b672018-11-20 04:29:35 +08003212 map.m_may_create = false;
Chengguang Xu6d1451b2021-01-13 13:21:54 +08003213 end = max_file_blocks(inode);
Chao Yuc4020b22018-01-11 14:42:30 +08003214
3215 while (map.m_lblk < end) {
3216 map.m_len = end - map.m_lblk;
3217
Chao Yub2532c62018-04-24 10:55:28 +08003218 down_write(&fi->i_gc_rwsem[WRITE]);
Chao Yuc4020b22018-01-11 14:42:30 +08003219 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
Chao Yub2532c62018-04-24 10:55:28 +08003220 up_write(&fi->i_gc_rwsem[WRITE]);
Chao Yuc4020b22018-01-11 14:42:30 +08003221 if (err)
3222 return err;
3223
3224 map.m_lblk = m_next_extent;
3225 }
3226
Tom Rix4f55dc22021-05-15 11:09:41 -07003227 return 0;
Chao Yuc4020b22018-01-11 14:42:30 +08003228}
3229
3230static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3231{
3232 return f2fs_precache_extents(file_inode(filp));
3233}
3234
Qiuyang Sun04f0b2e2019-06-05 11:33:25 +08003235static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3236{
3237 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3238 __u64 block_count;
Qiuyang Sun04f0b2e2019-06-05 11:33:25 +08003239
3240 if (!capable(CAP_SYS_ADMIN))
3241 return -EPERM;
3242
3243 if (f2fs_readonly(sbi->sb))
3244 return -EROFS;
3245
3246 if (copy_from_user(&block_count, (void __user *)arg,
3247 sizeof(block_count)))
3248 return -EFAULT;
3249
Jaegeuk Kimb4b10062020-03-31 11:43:07 -07003250 return f2fs_resize_fs(sbi, block_count);
Qiuyang Sun04f0b2e2019-06-05 11:33:25 +08003251}
3252
Eric Biggers95ae2512019-07-22 09:26:24 -07003253static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3254{
3255 struct inode *inode = file_inode(filp);
3256
3257 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3258
3259 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3260 f2fs_warn(F2FS_I_SB(inode),
Joe Perches833dcd32021-05-26 13:05:36 -07003261 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
Eric Biggers95ae2512019-07-22 09:26:24 -07003262 inode->i_ino);
3263 return -EOPNOTSUPP;
3264 }
3265
3266 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3267}
3268
3269static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3270{
3271 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3272 return -EOPNOTSUPP;
3273
3274 return fsverity_ioctl_measure(filp, (void __user *)arg);
3275}
3276
Eric Biggerse17fe652021-01-15 10:18:16 -08003277static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3278{
3279 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3280 return -EOPNOTSUPP;
3281
3282 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3283}
3284
Eric Biggers3357af82020-07-14 15:18:12 -07003285static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
Chao Yu4507847c2019-07-17 17:06:11 +08003286{
3287 struct inode *inode = file_inode(filp);
3288 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3289 char *vbuf;
3290 int count;
3291 int err = 0;
3292
3293 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3294 if (!vbuf)
3295 return -ENOMEM;
3296
3297 down_read(&sbi->sb_lock);
3298 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3299 ARRAY_SIZE(sbi->raw_super->volume_name),
3300 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3301 up_read(&sbi->sb_lock);
3302
3303 if (copy_to_user((char __user *)arg, vbuf,
3304 min(FSLABEL_MAX, count)))
3305 err = -EFAULT;
3306
Chao Yuc8eb7022020-09-14 16:47:00 +08003307 kfree(vbuf);
Chao Yu4507847c2019-07-17 17:06:11 +08003308 return err;
3309}
3310
Eric Biggers3357af82020-07-14 15:18:12 -07003311static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
Chao Yu4507847c2019-07-17 17:06:11 +08003312{
3313 struct inode *inode = file_inode(filp);
3314 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3315 char *vbuf;
3316 int err = 0;
3317
3318 if (!capable(CAP_SYS_ADMIN))
3319 return -EPERM;
3320
3321 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3322 if (IS_ERR(vbuf))
3323 return PTR_ERR(vbuf);
3324
3325 err = mnt_want_write_file(filp);
3326 if (err)
3327 goto out;
3328
3329 down_write(&sbi->sb_lock);
3330
3331 memset(sbi->raw_super->volume_name, 0,
3332 sizeof(sbi->raw_super->volume_name));
3333 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3334 sbi->raw_super->volume_name,
3335 ARRAY_SIZE(sbi->raw_super->volume_name));
3336
3337 err = f2fs_commit_super(sbi, false);
3338
3339 up_write(&sbi->sb_lock);
3340
3341 mnt_drop_write_file(filp);
3342out:
3343 kfree(vbuf);
3344 return err;
3345}
3346
Chao Yu439dfb12020-02-21 18:09:21 +08003347static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3348{
3349 struct inode *inode = file_inode(filp);
3350 __u64 blocks;
3351
3352 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3353 return -EOPNOTSUPP;
3354
3355 if (!f2fs_compressed_file(inode))
3356 return -EINVAL;
3357
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003358 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
Chao Yu439dfb12020-02-21 18:09:21 +08003359 return put_user(blocks, (u64 __user *)arg);
3360}
3361
Chao Yuef8d5632020-03-06 15:36:09 +08003362static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3363{
3364 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3365 unsigned int released_blocks = 0;
3366 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3367 block_t blkaddr;
3368 int i;
3369
3370 for (i = 0; i < count; i++) {
3371 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3372 dn->ofs_in_node + i);
3373
3374 if (!__is_valid_data_blkaddr(blkaddr))
3375 continue;
3376 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3377 DATA_GENERIC_ENHANCE)))
3378 return -EFSCORRUPTED;
3379 }
3380
3381 while (count) {
3382 int compr_blocks = 0;
3383
3384 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3385 blkaddr = f2fs_data_blkaddr(dn);
3386
3387 if (i == 0) {
3388 if (blkaddr == COMPRESS_ADDR)
3389 continue;
3390 dn->ofs_in_node += cluster_size;
3391 goto next;
3392 }
3393
3394 if (__is_valid_data_blkaddr(blkaddr))
3395 compr_blocks++;
3396
3397 if (blkaddr != NEW_ADDR)
3398 continue;
3399
3400 dn->data_blkaddr = NULL_ADDR;
3401 f2fs_set_data_blkaddr(dn);
3402 }
3403
3404 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3405 dec_valid_block_count(sbi, dn->inode,
3406 cluster_size - compr_blocks);
3407
3408 released_blocks += cluster_size - compr_blocks;
3409next:
3410 count -= cluster_size;
3411 }
3412
3413 return released_blocks;
3414}
3415
3416static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3417{
3418 struct inode *inode = file_inode(filp);
3419 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3420 pgoff_t page_idx = 0, last_idx;
3421 unsigned int released_blocks = 0;
3422 int ret;
3423 int writecount;
3424
3425 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3426 return -EOPNOTSUPP;
3427
3428 if (!f2fs_compressed_file(inode))
3429 return -EINVAL;
3430
3431 if (f2fs_readonly(sbi->sb))
3432 return -EROFS;
3433
3434 ret = mnt_want_write_file(filp);
3435 if (ret)
3436 return ret;
3437
3438 f2fs_balance_fs(F2FS_I_SB(inode), true);
3439
3440 inode_lock(inode);
3441
3442 writecount = atomic_read(&inode->i_writecount);
Daeho Jeong8c8cf262020-10-12 13:59:47 +09003443 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3444 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
Chao Yuef8d5632020-03-06 15:36:09 +08003445 ret = -EBUSY;
3446 goto out;
3447 }
3448
Jaegeuk Kimc6140412021-05-25 11:39:35 -07003449 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
Chao Yuef8d5632020-03-06 15:36:09 +08003450 ret = -EINVAL;
3451 goto out;
3452 }
3453
3454 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3455 if (ret)
3456 goto out;
3457
Jaegeuk Kimc6140412021-05-25 11:39:35 -07003458 set_inode_flag(inode, FI_COMPRESS_RELEASED);
Chao Yuef8d5632020-03-06 15:36:09 +08003459 inode->i_ctime = current_time(inode);
3460 f2fs_mark_inode_dirty_sync(inode, true);
3461
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003462 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
Daeho Jeong567c4bf2020-07-30 14:09:28 +09003463 goto out;
3464
Chao Yuef8d5632020-03-06 15:36:09 +08003465 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jan Karaedc6d012021-04-13 18:10:37 +02003466 filemap_invalidate_lock(inode->i_mapping);
Chao Yuef8d5632020-03-06 15:36:09 +08003467
3468 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3469
3470 while (page_idx < last_idx) {
3471 struct dnode_of_data dn;
3472 pgoff_t end_offset, count;
3473
3474 set_new_dnode(&dn, inode, NULL, NULL, 0);
3475 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3476 if (ret) {
3477 if (ret == -ENOENT) {
3478 page_idx = f2fs_get_next_page_offset(&dn,
3479 page_idx);
3480 ret = 0;
3481 continue;
3482 }
3483 break;
3484 }
3485
3486 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3487 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
Chao Yu4fec3fc02020-04-08 19:55:17 +08003488 count = round_up(count, F2FS_I(inode)->i_cluster_size);
Chao Yuef8d5632020-03-06 15:36:09 +08003489
3490 ret = release_compress_blocks(&dn, count);
3491
3492 f2fs_put_dnode(&dn);
3493
3494 if (ret < 0)
3495 break;
3496
3497 page_idx += count;
3498 released_blocks += ret;
3499 }
3500
Jan Karaedc6d012021-04-13 18:10:37 +02003501 filemap_invalidate_unlock(inode->i_mapping);
Chao Yud75da8c2021-08-24 08:11:38 +08003502 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yuef8d5632020-03-06 15:36:09 +08003503out:
3504 inode_unlock(inode);
3505
3506 mnt_drop_write_file(filp);
3507
3508 if (ret >= 0) {
3509 ret = put_user(released_blocks, (u64 __user *)arg);
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003510 } else if (released_blocks &&
3511 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
Chao Yuef8d5632020-03-06 15:36:09 +08003512 set_sbi_flag(sbi, SBI_NEED_FSCK);
3513 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003514 "iblocks=%llu, released=%u, compr_blocks=%u, "
Chao Yuef8d5632020-03-06 15:36:09 +08003515 "run fsck to fix.",
3516 __func__, inode->i_ino, inode->i_blocks,
3517 released_blocks,
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003518 atomic_read(&F2FS_I(inode)->i_compr_blocks));
Chao Yuef8d5632020-03-06 15:36:09 +08003519 }
3520
3521 return ret;
3522}
3523
Chao Yuc75488f2020-03-06 14:35:33 +08003524static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3525{
3526 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3527 unsigned int reserved_blocks = 0;
3528 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3529 block_t blkaddr;
3530 int i;
3531
3532 for (i = 0; i < count; i++) {
3533 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3534 dn->ofs_in_node + i);
3535
3536 if (!__is_valid_data_blkaddr(blkaddr))
3537 continue;
3538 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3539 DATA_GENERIC_ENHANCE)))
3540 return -EFSCORRUPTED;
3541 }
3542
3543 while (count) {
3544 int compr_blocks = 0;
3545 blkcnt_t reserved;
3546 int ret;
3547
3548 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3549 blkaddr = f2fs_data_blkaddr(dn);
3550
3551 if (i == 0) {
3552 if (blkaddr == COMPRESS_ADDR)
3553 continue;
3554 dn->ofs_in_node += cluster_size;
3555 goto next;
3556 }
3557
3558 if (__is_valid_data_blkaddr(blkaddr)) {
3559 compr_blocks++;
3560 continue;
3561 }
3562
3563 dn->data_blkaddr = NEW_ADDR;
3564 f2fs_set_data_blkaddr(dn);
3565 }
3566
3567 reserved = cluster_size - compr_blocks;
3568 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3569 if (ret)
3570 return ret;
3571
3572 if (reserved != cluster_size - compr_blocks)
3573 return -ENOSPC;
3574
3575 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3576
3577 reserved_blocks += reserved;
3578next:
3579 count -= cluster_size;
3580 }
3581
3582 return reserved_blocks;
3583}
3584
3585static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3586{
3587 struct inode *inode = file_inode(filp);
3588 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3589 pgoff_t page_idx = 0, last_idx;
3590 unsigned int reserved_blocks = 0;
3591 int ret;
3592
3593 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3594 return -EOPNOTSUPP;
3595
3596 if (!f2fs_compressed_file(inode))
3597 return -EINVAL;
3598
3599 if (f2fs_readonly(sbi->sb))
3600 return -EROFS;
3601
3602 ret = mnt_want_write_file(filp);
3603 if (ret)
3604 return ret;
3605
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003606 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
Chao Yuc75488f2020-03-06 14:35:33 +08003607 goto out;
3608
3609 f2fs_balance_fs(F2FS_I_SB(inode), true);
3610
3611 inode_lock(inode);
3612
Jaegeuk Kimc6140412021-05-25 11:39:35 -07003613 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
Chao Yuc75488f2020-03-06 14:35:33 +08003614 ret = -EINVAL;
3615 goto unlock_inode;
3616 }
3617
3618 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jan Karaedc6d012021-04-13 18:10:37 +02003619 filemap_invalidate_lock(inode->i_mapping);
Chao Yuc75488f2020-03-06 14:35:33 +08003620
3621 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3622
3623 while (page_idx < last_idx) {
3624 struct dnode_of_data dn;
3625 pgoff_t end_offset, count;
3626
3627 set_new_dnode(&dn, inode, NULL, NULL, 0);
3628 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3629 if (ret) {
3630 if (ret == -ENOENT) {
3631 page_idx = f2fs_get_next_page_offset(&dn,
3632 page_idx);
3633 ret = 0;
3634 continue;
3635 }
3636 break;
3637 }
3638
3639 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3640 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
Chao Yu4fec3fc02020-04-08 19:55:17 +08003641 count = round_up(count, F2FS_I(inode)->i_cluster_size);
Chao Yuc75488f2020-03-06 14:35:33 +08003642
3643 ret = reserve_compress_blocks(&dn, count);
3644
3645 f2fs_put_dnode(&dn);
3646
3647 if (ret < 0)
3648 break;
3649
3650 page_idx += count;
3651 reserved_blocks += ret;
3652 }
3653
Jan Karaedc6d012021-04-13 18:10:37 +02003654 filemap_invalidate_unlock(inode->i_mapping);
Chao Yud75da8c2021-08-24 08:11:38 +08003655 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yuc75488f2020-03-06 14:35:33 +08003656
3657 if (ret >= 0) {
Jaegeuk Kimc6140412021-05-25 11:39:35 -07003658 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
Chao Yuc75488f2020-03-06 14:35:33 +08003659 inode->i_ctime = current_time(inode);
3660 f2fs_mark_inode_dirty_sync(inode, true);
3661 }
3662unlock_inode:
3663 inode_unlock(inode);
3664out:
3665 mnt_drop_write_file(filp);
3666
3667 if (ret >= 0) {
3668 ret = put_user(reserved_blocks, (u64 __user *)arg);
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003669 } else if (reserved_blocks &&
3670 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
Chao Yuc75488f2020-03-06 14:35:33 +08003671 set_sbi_flag(sbi, SBI_NEED_FSCK);
3672 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003673 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
Chao Yuc75488f2020-03-06 14:35:33 +08003674 "run fsck to fix.",
3675 __func__, inode->i_ino, inode->i_blocks,
3676 reserved_blocks,
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003677 atomic_read(&F2FS_I(inode)->i_compr_blocks));
Chao Yuc75488f2020-03-06 14:35:33 +08003678 }
3679
3680 return ret;
3681}
3682
Daeho Jeong9af84642020-07-21 12:21:11 +09003683static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3684 pgoff_t off, block_t block, block_t len, u32 flags)
3685{
3686 struct request_queue *q = bdev_get_queue(bdev);
3687 sector_t sector = SECTOR_FROM_BLOCK(block);
3688 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3689 int ret = 0;
3690
3691 if (!q)
3692 return -ENXIO;
3693
3694 if (flags & F2FS_TRIM_FILE_DISCARD)
3695 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3696 blk_queue_secure_erase(q) ?
3697 BLKDEV_DISCARD_SECURE : 0);
3698
3699 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3700 if (IS_ENCRYPTED(inode))
3701 ret = fscrypt_zeroout_range(inode, off, block, len);
3702 else
3703 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3704 GFP_NOFS, 0);
3705 }
3706
3707 return ret;
3708}
3709
3710static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3711{
3712 struct inode *inode = file_inode(filp);
3713 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3714 struct address_space *mapping = inode->i_mapping;
3715 struct block_device *prev_bdev = NULL;
3716 struct f2fs_sectrim_range range;
3717 pgoff_t index, pg_end, prev_index = 0;
3718 block_t prev_block = 0, len = 0;
3719 loff_t end_addr;
3720 bool to_end = false;
3721 int ret = 0;
3722
3723 if (!(filp->f_mode & FMODE_WRITE))
3724 return -EBADF;
3725
3726 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3727 sizeof(range)))
3728 return -EFAULT;
3729
3730 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3731 !S_ISREG(inode->i_mode))
3732 return -EINVAL;
3733
3734 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3735 !f2fs_hw_support_discard(sbi)) ||
3736 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3737 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3738 return -EOPNOTSUPP;
3739
3740 file_start_write(filp);
3741 inode_lock(inode);
3742
3743 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3744 range.start >= inode->i_size) {
3745 ret = -EINVAL;
3746 goto err;
3747 }
3748
3749 if (range.len == 0)
3750 goto err;
3751
3752 if (inode->i_size - range.start > range.len) {
3753 end_addr = range.start + range.len;
3754 } else {
3755 end_addr = range.len == (u64)-1 ?
3756 sbi->sb->s_maxbytes : inode->i_size;
3757 to_end = true;
3758 }
3759
3760 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3761 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3762 ret = -EINVAL;
3763 goto err;
3764 }
3765
3766 index = F2FS_BYTES_TO_BLK(range.start);
3767 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3768
3769 ret = f2fs_convert_inline_inode(inode);
3770 if (ret)
3771 goto err;
3772
3773 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jan Karaedc6d012021-04-13 18:10:37 +02003774 filemap_invalidate_lock(mapping);
Daeho Jeong9af84642020-07-21 12:21:11 +09003775
3776 ret = filemap_write_and_wait_range(mapping, range.start,
3777 to_end ? LLONG_MAX : end_addr - 1);
3778 if (ret)
3779 goto out;
3780
3781 truncate_inode_pages_range(mapping, range.start,
3782 to_end ? -1 : end_addr - 1);
3783
3784 while (index < pg_end) {
3785 struct dnode_of_data dn;
3786 pgoff_t end_offset, count;
3787 int i;
3788
3789 set_new_dnode(&dn, inode, NULL, NULL, 0);
3790 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3791 if (ret) {
3792 if (ret == -ENOENT) {
3793 index = f2fs_get_next_page_offset(&dn, index);
3794 continue;
3795 }
3796 goto out;
3797 }
3798
3799 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3800 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3801 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3802 struct block_device *cur_bdev;
3803 block_t blkaddr = f2fs_data_blkaddr(&dn);
3804
3805 if (!__is_valid_data_blkaddr(blkaddr))
3806 continue;
3807
3808 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3809 DATA_GENERIC_ENHANCE)) {
3810 ret = -EFSCORRUPTED;
3811 f2fs_put_dnode(&dn);
3812 goto out;
3813 }
3814
3815 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3816 if (f2fs_is_multi_device(sbi)) {
3817 int di = f2fs_target_device_index(sbi, blkaddr);
3818
3819 blkaddr -= FDEV(di).start_blk;
3820 }
3821
3822 if (len) {
3823 if (prev_bdev == cur_bdev &&
3824 index == prev_index + len &&
3825 blkaddr == prev_block + len) {
3826 len++;
3827 } else {
3828 ret = f2fs_secure_erase(prev_bdev,
3829 inode, prev_index, prev_block,
3830 len, range.flags);
3831 if (ret) {
3832 f2fs_put_dnode(&dn);
3833 goto out;
3834 }
3835
3836 len = 0;
3837 }
3838 }
3839
3840 if (!len) {
3841 prev_bdev = cur_bdev;
3842 prev_index = index;
3843 prev_block = blkaddr;
3844 len = 1;
3845 }
3846 }
3847
3848 f2fs_put_dnode(&dn);
3849
3850 if (fatal_signal_pending(current)) {
3851 ret = -EINTR;
3852 goto out;
3853 }
3854 cond_resched();
3855 }
3856
3857 if (len)
3858 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3859 prev_block, len, range.flags);
3860out:
Jan Karaedc6d012021-04-13 18:10:37 +02003861 filemap_invalidate_unlock(mapping);
Daeho Jeong9af84642020-07-21 12:21:11 +09003862 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3863err:
3864 inode_unlock(inode);
3865 file_end_write(filp);
3866
3867 return ret;
3868}
3869
Daeho Jeong9e2a5f82020-10-30 13:10:34 +09003870static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
Jaegeuk Kim52656e62014-09-24 15:37:02 -07003871{
Daeho Jeong9e2a5f82020-10-30 13:10:34 +09003872 struct inode *inode = file_inode(filp);
3873 struct f2fs_comp_option option;
Jaegeuk Kim1f227a32017-10-23 23:48:49 +02003874
Daeho Jeong9e2a5f82020-10-30 13:10:34 +09003875 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3876 return -EOPNOTSUPP;
3877
3878 inode_lock_shared(inode);
3879
3880 if (!f2fs_compressed_file(inode)) {
3881 inode_unlock_shared(inode);
3882 return -ENODATA;
3883 }
3884
3885 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3886 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3887
3888 inode_unlock_shared(inode);
3889
3890 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3891 sizeof(option)))
3892 return -EFAULT;
3893
3894 return 0;
3895}
3896
Daeho Jeonge1e8deb2020-10-30 13:10:35 +09003897static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3898{
3899 struct inode *inode = file_inode(filp);
3900 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3901 struct f2fs_comp_option option;
3902 int ret = 0;
3903
3904 if (!f2fs_sb_has_compression(sbi))
3905 return -EOPNOTSUPP;
3906
3907 if (!(filp->f_mode & FMODE_WRITE))
3908 return -EBADF;
3909
3910 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3911 sizeof(option)))
3912 return -EFAULT;
3913
3914 if (!f2fs_compressed_file(inode) ||
3915 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3916 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3917 option.algorithm >= COMPRESS_MAX)
3918 return -EINVAL;
3919
3920 file_start_write(filp);
3921 inode_lock(inode);
3922
3923 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3924 ret = -EBUSY;
3925 goto out;
3926 }
3927
3928 if (inode->i_size != 0) {
3929 ret = -EFBIG;
3930 goto out;
3931 }
3932
3933 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3934 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3935 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3936 f2fs_mark_inode_dirty_sync(inode, true);
3937
3938 if (!f2fs_is_compress_backend_ready(inode))
3939 f2fs_warn(sbi, "compression algorithm is successfully set, "
3940 "but current kernel doesn't support this algorithm.");
3941out:
3942 inode_unlock(inode);
3943 file_end_write(filp);
3944
3945 return ret;
3946}
3947
Daeho Jeong5fdb3222020-12-03 15:56:15 +09003948static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3949{
Matthew Wilcox (Oracle)fcd9ae42021-04-07 21:18:55 +01003950 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
Daeho Jeong5fdb3222020-12-03 15:56:15 +09003951 struct address_space *mapping = inode->i_mapping;
3952 struct page *page;
3953 pgoff_t redirty_idx = page_idx;
3954 int i, page_len = 0, ret = 0;
3955
3956 page_cache_ra_unbounded(&ractl, len, 0);
3957
3958 for (i = 0; i < len; i++, page_idx++) {
3959 page = read_cache_page(mapping, page_idx, NULL, NULL);
3960 if (IS_ERR(page)) {
3961 ret = PTR_ERR(page);
3962 break;
3963 }
3964 page_len++;
3965 }
3966
3967 for (i = 0; i < page_len; i++, redirty_idx++) {
3968 page = find_lock_page(mapping, redirty_idx);
Daeho Jeongdf0736d2021-01-06 08:49:28 +09003969 if (!page) {
3970 ret = -ENOMEM;
3971 break;
3972 }
Daeho Jeong5fdb3222020-12-03 15:56:15 +09003973 set_page_dirty(page);
3974 f2fs_put_page(page, 1);
3975 f2fs_put_page(page, 0);
3976 }
3977
3978 return ret;
3979}
3980
3981static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
3982{
3983 struct inode *inode = file_inode(filp);
3984 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3985 struct f2fs_inode_info *fi = F2FS_I(inode);
3986 pgoff_t page_idx = 0, last_idx;
3987 unsigned int blk_per_seg = sbi->blocks_per_seg;
3988 int cluster_size = F2FS_I(inode)->i_cluster_size;
3989 int count, ret;
3990
3991 if (!f2fs_sb_has_compression(sbi) ||
3992 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
3993 return -EOPNOTSUPP;
3994
3995 if (!(filp->f_mode & FMODE_WRITE))
3996 return -EBADF;
3997
3998 if (!f2fs_compressed_file(inode))
3999 return -EINVAL;
4000
4001 f2fs_balance_fs(F2FS_I_SB(inode), true);
4002
4003 file_start_write(filp);
4004 inode_lock(inode);
4005
4006 if (!f2fs_is_compress_backend_ready(inode)) {
4007 ret = -EOPNOTSUPP;
4008 goto out;
4009 }
4010
4011 if (f2fs_is_mmap_file(inode)) {
4012 ret = -EBUSY;
4013 goto out;
4014 }
4015
4016 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4017 if (ret)
4018 goto out;
4019
4020 if (!atomic_read(&fi->i_compr_blocks))
4021 goto out;
4022
4023 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4024
4025 count = last_idx - page_idx;
4026 while (count) {
4027 int len = min(cluster_size, count);
4028
4029 ret = redirty_blocks(inode, page_idx, len);
4030 if (ret < 0)
4031 break;
4032
4033 if (get_dirty_pages(inode) >= blk_per_seg)
4034 filemap_fdatawrite(inode->i_mapping);
4035
4036 count -= len;
4037 page_idx += len;
4038 }
4039
4040 if (!ret)
4041 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4042 LLONG_MAX);
4043
4044 if (ret)
Joe Perches833dcd32021-05-26 13:05:36 -07004045 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4046 __func__, ret);
Daeho Jeong5fdb3222020-12-03 15:56:15 +09004047out:
4048 inode_unlock(inode);
4049 file_end_write(filp);
4050
4051 return ret;
4052}
4053
4054static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4055{
4056 struct inode *inode = file_inode(filp);
4057 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4058 pgoff_t page_idx = 0, last_idx;
4059 unsigned int blk_per_seg = sbi->blocks_per_seg;
4060 int cluster_size = F2FS_I(inode)->i_cluster_size;
4061 int count, ret;
4062
4063 if (!f2fs_sb_has_compression(sbi) ||
4064 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4065 return -EOPNOTSUPP;
4066
4067 if (!(filp->f_mode & FMODE_WRITE))
4068 return -EBADF;
4069
4070 if (!f2fs_compressed_file(inode))
4071 return -EINVAL;
4072
4073 f2fs_balance_fs(F2FS_I_SB(inode), true);
4074
4075 file_start_write(filp);
4076 inode_lock(inode);
4077
4078 if (!f2fs_is_compress_backend_ready(inode)) {
4079 ret = -EOPNOTSUPP;
4080 goto out;
4081 }
4082
4083 if (f2fs_is_mmap_file(inode)) {
4084 ret = -EBUSY;
4085 goto out;
4086 }
4087
4088 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4089 if (ret)
4090 goto out;
4091
4092 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4093
4094 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4095
4096 count = last_idx - page_idx;
4097 while (count) {
4098 int len = min(cluster_size, count);
4099
4100 ret = redirty_blocks(inode, page_idx, len);
4101 if (ret < 0)
4102 break;
4103
4104 if (get_dirty_pages(inode) >= blk_per_seg)
4105 filemap_fdatawrite(inode->i_mapping);
4106
4107 count -= len;
4108 page_idx += len;
4109 }
4110
4111 if (!ret)
4112 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4113 LLONG_MAX);
4114
4115 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4116
4117 if (ret)
Joe Perches833dcd32021-05-26 13:05:36 -07004118 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4119 __func__, ret);
Daeho Jeong5fdb3222020-12-03 15:56:15 +09004120out:
4121 inode_unlock(inode);
4122 file_end_write(filp);
4123
4124 return ret;
4125}
4126
Chao Yu34178b1b2020-11-10 09:24:37 +08004127static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004128{
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004129 switch (cmd) {
Eric Biggers3357af82020-07-14 15:18:12 -07004130 case FS_IOC_GETVERSION:
Chao Yud49f3e82015-01-23 20:36:04 +08004131 return f2fs_ioc_getversion(filp, arg);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07004132 case F2FS_IOC_START_ATOMIC_WRITE:
4133 return f2fs_ioc_start_atomic_write(filp);
4134 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4135 return f2fs_ioc_commit_atomic_write(filp);
Jaegeuk Kim02a13352014-10-06 16:11:16 -07004136 case F2FS_IOC_START_VOLATILE_WRITE:
4137 return f2fs_ioc_start_volatile_write(filp);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08004138 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4139 return f2fs_ioc_release_volatile_write(filp);
4140 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4141 return f2fs_ioc_abort_volatile_write(filp);
Jaegeuk Kim1abff932015-01-08 19:15:53 -08004142 case F2FS_IOC_SHUTDOWN:
4143 return f2fs_ioc_shutdown(filp, arg);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07004144 case FITRIM:
Jaegeuk Kim52656e62014-09-24 15:37:02 -07004145 return f2fs_ioc_fitrim(filp, arg);
Eric Biggers3357af82020-07-14 15:18:12 -07004146 case FS_IOC_SET_ENCRYPTION_POLICY:
Jaegeuk Kimf424f662015-04-20 15:19:06 -07004147 return f2fs_ioc_set_encryption_policy(filp, arg);
Eric Biggers3357af82020-07-14 15:18:12 -07004148 case FS_IOC_GET_ENCRYPTION_POLICY:
Jaegeuk Kimf424f662015-04-20 15:19:06 -07004149 return f2fs_ioc_get_encryption_policy(filp, arg);
Eric Biggers3357af82020-07-14 15:18:12 -07004150 case FS_IOC_GET_ENCRYPTION_PWSALT:
Jaegeuk Kimf424f662015-04-20 15:19:06 -07004151 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
Eric Biggers8ce589c2019-08-04 19:35:48 -07004152 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4153 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4154 case FS_IOC_ADD_ENCRYPTION_KEY:
4155 return f2fs_ioc_add_encryption_key(filp, arg);
4156 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4157 return f2fs_ioc_remove_encryption_key(filp, arg);
4158 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4159 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4160 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4161 return f2fs_ioc_get_encryption_key_status(filp, arg);
Eric Biggersee446e12020-03-14 13:50:51 -07004162 case FS_IOC_GET_ENCRYPTION_NONCE:
4163 return f2fs_ioc_get_encryption_nonce(filp, arg);
Chao Yuc1c1b582015-07-10 18:08:10 +08004164 case F2FS_IOC_GARBAGE_COLLECT:
4165 return f2fs_ioc_gc(filp, arg);
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07004166 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4167 return f2fs_ioc_gc_range(filp, arg);
Chao Yu456b88e2015-10-05 22:24:19 +08004168 case F2FS_IOC_WRITE_CHECKPOINT:
Chao Yu059c0642018-07-17 20:41:49 +08004169 return f2fs_ioc_write_checkpoint(filp, arg);
Chao Yud323d002015-10-27 09:53:45 +08004170 case F2FS_IOC_DEFRAGMENT:
4171 return f2fs_ioc_defragment(filp, arg);
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07004172 case F2FS_IOC_MOVE_RANGE:
4173 return f2fs_ioc_move_range(filp, arg);
Jaegeuk Kime066b832017-04-13 15:17:00 -07004174 case F2FS_IOC_FLUSH_DEVICE:
4175 return f2fs_ioc_flush_device(filp, arg);
Jaegeuk Kime65ef202017-07-21 12:58:59 -07004176 case F2FS_IOC_GET_FEATURES:
4177 return f2fs_ioc_get_features(filp, arg);
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08004178 case F2FS_IOC_GET_PIN_FILE:
4179 return f2fs_ioc_get_pin_file(filp, arg);
4180 case F2FS_IOC_SET_PIN_FILE:
4181 return f2fs_ioc_set_pin_file(filp, arg);
Chao Yuc4020b22018-01-11 14:42:30 +08004182 case F2FS_IOC_PRECACHE_EXTENTS:
4183 return f2fs_ioc_precache_extents(filp, arg);
Qiuyang Sun04f0b2e2019-06-05 11:33:25 +08004184 case F2FS_IOC_RESIZE_FS:
4185 return f2fs_ioc_resize_fs(filp, arg);
Eric Biggers95ae2512019-07-22 09:26:24 -07004186 case FS_IOC_ENABLE_VERITY:
4187 return f2fs_ioc_enable_verity(filp, arg);
4188 case FS_IOC_MEASURE_VERITY:
4189 return f2fs_ioc_measure_verity(filp, arg);
Eric Biggerse17fe652021-01-15 10:18:16 -08004190 case FS_IOC_READ_VERITY_METADATA:
4191 return f2fs_ioc_read_verity_metadata(filp, arg);
Eric Biggers3357af82020-07-14 15:18:12 -07004192 case FS_IOC_GETFSLABEL:
4193 return f2fs_ioc_getfslabel(filp, arg);
4194 case FS_IOC_SETFSLABEL:
4195 return f2fs_ioc_setfslabel(filp, arg);
Chao Yu439dfb12020-02-21 18:09:21 +08004196 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4197 return f2fs_get_compress_blocks(filp, arg);
Chao Yuef8d5632020-03-06 15:36:09 +08004198 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4199 return f2fs_release_compress_blocks(filp, arg);
Chao Yuc75488f2020-03-06 14:35:33 +08004200 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4201 return f2fs_reserve_compress_blocks(filp, arg);
Daeho Jeong9af84642020-07-21 12:21:11 +09004202 case F2FS_IOC_SEC_TRIM_FILE:
4203 return f2fs_sec_trim_file(filp, arg);
Daeho Jeong9e2a5f82020-10-30 13:10:34 +09004204 case F2FS_IOC_GET_COMPRESS_OPTION:
4205 return f2fs_ioc_get_compress_option(filp, arg);
Daeho Jeonge1e8deb2020-10-30 13:10:35 +09004206 case F2FS_IOC_SET_COMPRESS_OPTION:
4207 return f2fs_ioc_set_compress_option(filp, arg);
Daeho Jeong5fdb3222020-12-03 15:56:15 +09004208 case F2FS_IOC_DECOMPRESS_FILE:
4209 return f2fs_ioc_decompress_file(filp, arg);
4210 case F2FS_IOC_COMPRESS_FILE:
4211 return f2fs_ioc_compress_file(filp, arg);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004212 default:
4213 return -ENOTTY;
4214 }
4215}
4216
Chao Yu34178b1b2020-11-10 09:24:37 +08004217long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4218{
4219 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4220 return -EIO;
4221 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4222 return -ENOSPC;
4223
4224 return __f2fs_ioctl(filp, cmd, arg);
4225}
4226
Eric Biggersa1e09b02021-07-23 00:59:21 -07004227/*
4228 * Return %true if the given read or write request should use direct I/O, or
4229 * %false if it should use buffered I/O.
4230 */
4231static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4232 struct iov_iter *iter)
4233{
4234 unsigned int align;
4235
4236 if (!(iocb->ki_flags & IOCB_DIRECT))
4237 return false;
4238
4239 if (f2fs_force_buffered_io(inode, iocb, iter))
4240 return false;
4241
4242 /*
4243 * Direct I/O not aligned to the disk's logical_block_size will be
4244 * attempted, but will fail with -EINVAL.
4245 *
4246 * f2fs additionally requires that direct I/O be aligned to the
4247 * filesystem block size, which is often a stricter requirement.
4248 * However, f2fs traditionally falls back to buffered I/O on requests
4249 * that are logical_block_size-aligned but not fs-block aligned.
4250 *
4251 * The below logic implements this behavior.
4252 */
4253 align = iocb->ki_pos | iov_iter_alignment(iter);
4254 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4255 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4256 return false;
4257
4258 return true;
4259}
4260
4261static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4262 unsigned int flags)
4263{
4264 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4265
4266 dec_page_count(sbi, F2FS_DIO_READ);
4267 if (error)
4268 return error;
4269 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, size);
4270 return 0;
4271}
4272
4273static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4274 .end_io = f2fs_dio_read_end_io,
4275};
4276
4277static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
Chao Yu4c8ff702019-11-01 18:07:14 +08004278{
4279 struct file *file = iocb->ki_filp;
4280 struct inode *inode = file_inode(file);
Eric Biggersa1e09b02021-07-23 00:59:21 -07004281 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4282 struct f2fs_inode_info *fi = F2FS_I(inode);
4283 const loff_t pos = iocb->ki_pos;
4284 const size_t count = iov_iter_count(to);
4285 struct iomap_dio *dio;
4286 ssize_t ret;
4287
4288 if (count == 0)
4289 return 0; /* skip atime update */
4290
Jaegeuk Kimbd984c02021-11-19 11:20:33 -08004291 trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
Eric Biggersa1e09b02021-07-23 00:59:21 -07004292
4293 if (iocb->ki_flags & IOCB_NOWAIT) {
4294 if (!down_read_trylock(&fi->i_gc_rwsem[READ])) {
4295 ret = -EAGAIN;
4296 goto out;
4297 }
4298 } else {
4299 down_read(&fi->i_gc_rwsem[READ]);
4300 }
4301
4302 /*
4303 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4304 * the higher-level function iomap_dio_rw() in order to ensure that the
4305 * F2FS_DIO_READ counter will be decremented correctly in all cases.
4306 */
4307 inc_page_count(sbi, F2FS_DIO_READ);
4308 dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4309 &f2fs_iomap_dio_read_ops, 0, 0);
4310 if (IS_ERR_OR_NULL(dio)) {
4311 ret = PTR_ERR_OR_ZERO(dio);
4312 if (ret != -EIOCBQUEUED)
4313 dec_page_count(sbi, F2FS_DIO_READ);
4314 } else {
4315 ret = iomap_dio_complete(dio);
4316 }
4317
4318 up_read(&fi->i_gc_rwsem[READ]);
4319
4320 file_accessed(file);
4321out:
4322 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4323 return ret;
4324}
4325
4326static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4327{
4328 struct inode *inode = file_inode(iocb->ki_filp);
4329 ssize_t ret;
Chao Yu4c8ff702019-11-01 18:07:14 +08004330
4331 if (!f2fs_is_compress_backend_ready(inode))
4332 return -EOPNOTSUPP;
4333
Eric Biggersa1e09b02021-07-23 00:59:21 -07004334 if (f2fs_should_use_dio(inode, iocb, to))
4335 return f2fs_dio_read_iter(iocb, to);
Chao Yu8b83ac82020-04-16 18:16:56 +08004336
Eric Biggersa1e09b02021-07-23 00:59:21 -07004337 ret = filemap_read(iocb, to, 0);
Chao Yu8b83ac82020-04-16 18:16:56 +08004338 if (ret > 0)
Eric Biggersa1e09b02021-07-23 00:59:21 -07004339 f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_READ_IO, ret);
Chao Yu8b83ac82020-04-16 18:16:56 +08004340 return ret;
Chao Yu4c8ff702019-11-01 18:07:14 +08004341}
4342
Eric Biggersa1e09b02021-07-23 00:59:21 -07004343static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4344{
4345 struct file *file = iocb->ki_filp;
4346 struct inode *inode = file_inode(file);
4347 ssize_t count;
4348 int err;
4349
4350 if (IS_IMMUTABLE(inode))
4351 return -EPERM;
4352
4353 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4354 return -EPERM;
4355
4356 count = generic_write_checks(iocb, from);
4357 if (count <= 0)
4358 return count;
4359
4360 err = file_modified(file);
4361 if (err)
4362 return err;
4363 return count;
4364}
4365
Eric Biggers3d697a42021-07-16 09:39:13 -05004366/*
4367 * Preallocate blocks for a write request, if it is possible and helpful to do
4368 * so. Returns a positive number if blocks may have been preallocated, 0 if no
4369 * blocks were preallocated, or a negative errno value if something went
4370 * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4371 * requested blocks (not just some of them) have been allocated.
4372 */
Eric Biggersa1e09b02021-07-23 00:59:21 -07004373static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4374 bool dio)
Eric Biggers3d697a42021-07-16 09:39:13 -05004375{
4376 struct inode *inode = file_inode(iocb->ki_filp);
4377 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4378 const loff_t pos = iocb->ki_pos;
4379 const size_t count = iov_iter_count(iter);
4380 struct f2fs_map_blocks map = {};
Eric Biggers3d697a42021-07-16 09:39:13 -05004381 int flag;
4382 int ret;
4383
4384 /* If it will be an out-of-place direct write, don't bother. */
4385 if (dio && f2fs_lfs_mode(sbi))
4386 return 0;
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -08004387 /*
4388 * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4389 * buffered IO, if DIO meets any holes.
4390 */
4391 if (dio && i_size_read(inode) &&
4392 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4393 return 0;
Eric Biggers3d697a42021-07-16 09:39:13 -05004394
4395 /* No-wait I/O can't allocate blocks. */
4396 if (iocb->ki_flags & IOCB_NOWAIT)
4397 return 0;
4398
4399 /* If it will be a short write, don't bother. */
4400 if (fault_in_iov_iter_readable(iter, count))
4401 return 0;
4402
4403 if (f2fs_has_inline_data(inode)) {
4404 /* If the data will fit inline, don't bother. */
4405 if (pos + count <= MAX_INLINE_DATA(inode))
4406 return 0;
4407 ret = f2fs_convert_inline_inode(inode);
4408 if (ret)
4409 return ret;
4410 }
4411
4412 /* Do not preallocate blocks that will be written partially in 4KB. */
4413 map.m_lblk = F2FS_BLK_ALIGN(pos);
4414 map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4415 if (map.m_len > map.m_lblk)
4416 map.m_len -= map.m_lblk;
4417 else
4418 map.m_len = 0;
4419 map.m_may_create = true;
4420 if (dio) {
4421 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4422 flag = F2FS_GET_BLOCK_PRE_DIO;
4423 } else {
4424 map.m_seg_type = NO_CHECK_TYPE;
4425 flag = F2FS_GET_BLOCK_PRE_AIO;
4426 }
4427
4428 ret = f2fs_map_blocks(inode, &map, 1, flag);
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -08004429 /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4430 if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
Eric Biggers3d697a42021-07-16 09:39:13 -05004431 return ret;
4432 if (ret == 0)
4433 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4434 return map.m_len;
4435}
4436
Eric Biggersa1e09b02021-07-23 00:59:21 -07004437static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4438 struct iov_iter *from)
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07004439{
Jaegeuk Kimb439b102016-02-03 13:09:09 -08004440 struct file *file = iocb->ki_filp;
4441 struct inode *inode = file_inode(file);
Eric Biggersa1e09b02021-07-23 00:59:21 -07004442 ssize_t ret;
4443
4444 if (iocb->ki_flags & IOCB_NOWAIT)
4445 return -EOPNOTSUPP;
4446
4447 current->backing_dev_info = inode_to_bdi(inode);
4448 ret = generic_perform_write(file, from, iocb->ki_pos);
4449 current->backing_dev_info = NULL;
4450
4451 if (ret > 0) {
4452 iocb->ki_pos += ret;
4453 f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_IO, ret);
4454 }
4455 return ret;
4456}
4457
4458static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4459 unsigned int flags)
4460{
4461 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4462
4463 dec_page_count(sbi, F2FS_DIO_WRITE);
4464 if (error)
4465 return error;
4466 f2fs_update_iostat(sbi, APP_DIRECT_IO, size);
4467 return 0;
4468}
4469
4470static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4471 .end_io = f2fs_dio_write_end_io,
4472};
4473
4474static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4475 bool *may_need_sync)
4476{
4477 struct file *file = iocb->ki_filp;
4478 struct inode *inode = file_inode(file);
4479 struct f2fs_inode_info *fi = F2FS_I(inode);
4480 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4481 const bool do_opu = f2fs_lfs_mode(sbi);
4482 const int whint_mode = F2FS_OPTION(sbi).whint_mode;
4483 const loff_t pos = iocb->ki_pos;
4484 const ssize_t count = iov_iter_count(from);
4485 const enum rw_hint hint = iocb->ki_hint;
4486 unsigned int dio_flags;
4487 struct iomap_dio *dio;
4488 ssize_t ret;
4489
Jaegeuk Kimbd984c02021-11-19 11:20:33 -08004490 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
Eric Biggersa1e09b02021-07-23 00:59:21 -07004491
4492 if (iocb->ki_flags & IOCB_NOWAIT) {
4493 /* f2fs_convert_inline_inode() and block allocation can block */
4494 if (f2fs_has_inline_data(inode) ||
4495 !f2fs_overwrite_io(inode, pos, count)) {
4496 ret = -EAGAIN;
4497 goto out;
4498 }
4499
4500 if (!down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
4501 ret = -EAGAIN;
4502 goto out;
4503 }
4504 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
4505 up_read(&fi->i_gc_rwsem[WRITE]);
4506 ret = -EAGAIN;
4507 goto out;
4508 }
4509 } else {
4510 ret = f2fs_convert_inline_inode(inode);
4511 if (ret)
4512 goto out;
4513
4514 down_read(&fi->i_gc_rwsem[WRITE]);
4515 if (do_opu)
4516 down_read(&fi->i_gc_rwsem[READ]);
4517 }
4518 if (whint_mode == WHINT_MODE_OFF)
4519 iocb->ki_hint = WRITE_LIFE_NOT_SET;
4520
4521 /*
4522 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4523 * the higher-level function iomap_dio_rw() in order to ensure that the
4524 * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
4525 */
4526 inc_page_count(sbi, F2FS_DIO_WRITE);
4527 dio_flags = 0;
4528 if (pos + count > inode->i_size)
4529 dio_flags |= IOMAP_DIO_FORCE_WAIT;
4530 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4531 &f2fs_iomap_dio_write_ops, dio_flags, 0);
4532 if (IS_ERR_OR_NULL(dio)) {
4533 ret = PTR_ERR_OR_ZERO(dio);
4534 if (ret == -ENOTBLK)
4535 ret = 0;
4536 if (ret != -EIOCBQUEUED)
4537 dec_page_count(sbi, F2FS_DIO_WRITE);
4538 } else {
4539 ret = iomap_dio_complete(dio);
4540 }
4541
4542 if (whint_mode == WHINT_MODE_OFF)
4543 iocb->ki_hint = hint;
4544 if (do_opu)
4545 up_read(&fi->i_gc_rwsem[READ]);
4546 up_read(&fi->i_gc_rwsem[WRITE]);
4547
4548 if (ret < 0)
4549 goto out;
4550 if (pos + ret > inode->i_size)
4551 f2fs_i_size_write(inode, pos + ret);
4552 if (!do_opu)
4553 set_inode_flag(inode, FI_UPDATE_WRITE);
4554
4555 if (iov_iter_count(from)) {
4556 ssize_t ret2;
4557 loff_t bufio_start_pos = iocb->ki_pos;
4558
4559 /*
4560 * The direct write was partial, so we need to fall back to a
4561 * buffered write for the remainder.
4562 */
4563
4564 ret2 = f2fs_buffered_write_iter(iocb, from);
4565 if (iov_iter_count(from))
4566 f2fs_write_failed(inode, iocb->ki_pos);
4567 if (ret2 < 0)
4568 goto out;
4569
4570 /*
4571 * Ensure that the pagecache pages are written to disk and
4572 * invalidated to preserve the expected O_DIRECT semantics.
4573 */
4574 if (ret2 > 0) {
4575 loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4576
4577 ret += ret2;
4578
4579 ret2 = filemap_write_and_wait_range(file->f_mapping,
4580 bufio_start_pos,
4581 bufio_end_pos);
4582 if (ret2 < 0)
4583 goto out;
4584 invalidate_mapping_pages(file->f_mapping,
4585 bufio_start_pos >> PAGE_SHIFT,
4586 bufio_end_pos >> PAGE_SHIFT);
4587 }
4588 } else {
4589 /* iomap_dio_rw() already handled the generic_write_sync(). */
4590 *may_need_sync = false;
4591 }
4592out:
4593 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4594 return ret;
4595}
4596
4597static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4598{
4599 struct inode *inode = file_inode(iocb->ki_filp);
Eric Biggersccf7cf92021-07-16 09:39:15 -05004600 const loff_t orig_pos = iocb->ki_pos;
4601 const size_t orig_count = iov_iter_count(from);
Eric Biggers3d697a42021-07-16 09:39:13 -05004602 loff_t target_size;
Eric Biggersa1e09b02021-07-23 00:59:21 -07004603 bool dio;
4604 bool may_need_sync = true;
Eric Biggers3d697a42021-07-16 09:39:13 -05004605 int preallocated;
Jaegeuk Kimb439b102016-02-03 13:09:09 -08004606 ssize_t ret;
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07004607
Chao Yu126ce722019-04-02 18:52:22 +08004608 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4609 ret = -EIO;
4610 goto out;
4611 }
Jaegeuk Kim1f227a32017-10-23 23:48:49 +02004612
Chao Yu7bd29352020-02-24 19:20:15 +08004613 if (!f2fs_is_compress_backend_ready(inode)) {
4614 ret = -EOPNOTSUPP;
4615 goto out;
4616 }
Chao Yu4c8ff702019-11-01 18:07:14 +08004617
Goldwyn Rodriguescb8434f2019-09-11 11:45:17 -05004618 if (iocb->ki_flags & IOCB_NOWAIT) {
4619 if (!inode_trylock(inode)) {
Chao Yu126ce722019-04-02 18:52:22 +08004620 ret = -EAGAIN;
4621 goto out;
4622 }
Goldwyn Rodriguescb8434f2019-09-11 11:45:17 -05004623 } else {
Hyunchul Leeb91050a2018-03-08 19:34:38 +09004624 inode_lock(inode);
4625 }
4626
Eric Biggersa1e09b02021-07-23 00:59:21 -07004627 ret = f2fs_write_checks(iocb, from);
Eric Biggersb31bf0f2021-07-16 09:39:14 -05004628 if (ret <= 0)
4629 goto out_unlock;
4630
Eric Biggersa1e09b02021-07-23 00:59:21 -07004631 /* Determine whether we will do a direct write or a buffered write. */
4632 dio = f2fs_should_use_dio(inode, iocb, from);
Eric Biggersb31bf0f2021-07-16 09:39:14 -05004633
Eric Biggersb31bf0f2021-07-16 09:39:14 -05004634 /* Possibly preallocate the blocks for the write. */
4635 target_size = iocb->ki_pos + iov_iter_count(from);
Eric Biggersa1e09b02021-07-23 00:59:21 -07004636 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -08004637 if (preallocated < 0)
Eric Biggersb31bf0f2021-07-16 09:39:14 -05004638 ret = preallocated;
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -08004639 else
Eric Biggersa1e09b02021-07-23 00:59:21 -07004640 /* Do the actual write. */
4641 ret = dio ?
4642 f2fs_dio_write_iter(iocb, from, &may_need_sync):
4643 f2fs_buffered_write_iter(iocb, from);
Eric Biggersb31bf0f2021-07-16 09:39:14 -05004644
4645 /* Don't leave any preallocated blocks around past i_size. */
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -08004646 if (preallocated && i_size_read(inode) < target_size) {
Eric Biggersb31bf0f2021-07-16 09:39:14 -05004647 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4648 filemap_invalidate_lock(inode->i_mapping);
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -08004649 if (!f2fs_truncate(inode))
4650 file_dont_truncate(inode);
Eric Biggersb31bf0f2021-07-16 09:39:14 -05004651 filemap_invalidate_unlock(inode->i_mapping);
4652 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -08004653 } else {
4654 file_dont_truncate(inode);
Eric Biggersb31bf0f2021-07-16 09:39:14 -05004655 }
4656
4657 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
Eric Biggers3d697a42021-07-16 09:39:13 -05004658out_unlock:
Jaegeuk Kimb439b102016-02-03 13:09:09 -08004659 inode_unlock(inode);
Chao Yu126ce722019-04-02 18:52:22 +08004660out:
Eric Biggersccf7cf92021-07-16 09:39:15 -05004661 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
Eric Biggersa1e09b02021-07-23 00:59:21 -07004662 if (ret > 0 && may_need_sync)
Christoph Hellwige2592212016-04-07 08:52:01 -07004663 ret = generic_write_sync(iocb, ret);
Jaegeuk Kimb439b102016-02-03 13:09:09 -08004664 return ret;
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07004665}
4666
Daeho Jeong0f6b56e2021-08-02 21:22:45 -07004667static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4668 int advice)
4669{
Daeho Jeong0f6b56e2021-08-02 21:22:45 -07004670 struct address_space *mapping;
4671 struct backing_dev_info *bdi;
Fengnan Change64347a2021-11-29 10:13:41 +08004672 struct inode *inode = file_inode(filp);
4673 int err;
Daeho Jeong0f6b56e2021-08-02 21:22:45 -07004674
4675 if (advice == POSIX_FADV_SEQUENTIAL) {
Daeho Jeong0f6b56e2021-08-02 21:22:45 -07004676 if (S_ISFIFO(inode->i_mode))
4677 return -ESPIPE;
4678
4679 mapping = filp->f_mapping;
4680 if (!mapping || len < 0)
4681 return -EINVAL;
4682
4683 bdi = inode_to_bdi(mapping->host);
4684 filp->f_ra.ra_pages = bdi->ra_pages *
4685 F2FS_I_SB(inode)->seq_file_ra_mul;
4686 spin_lock(&filp->f_lock);
4687 filp->f_mode &= ~FMODE_RANDOM;
4688 spin_unlock(&filp->f_lock);
4689 return 0;
4690 }
4691
Fengnan Change64347a2021-11-29 10:13:41 +08004692 err = generic_fadvise(filp, offset, len, advice);
4693 if (!err && advice == POSIX_FADV_DONTNEED &&
4694 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4695 f2fs_compressed_file(inode))
4696 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
4697
4698 return err;
Daeho Jeong0f6b56e2021-08-02 21:22:45 -07004699}
4700
Namjae Jeone9750822013-02-04 23:41:41 +09004701#ifdef CONFIG_COMPAT
Chao Yu34178b1b2020-11-10 09:24:37 +08004702struct compat_f2fs_gc_range {
4703 u32 sync;
4704 compat_u64 start;
4705 compat_u64 len;
4706};
4707#define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4708 struct compat_f2fs_gc_range)
4709
4710static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4711{
4712 struct compat_f2fs_gc_range __user *urange;
4713 struct f2fs_gc_range range;
4714 int err;
4715
4716 urange = compat_ptr(arg);
4717 err = get_user(range.sync, &urange->sync);
4718 err |= get_user(range.start, &urange->start);
4719 err |= get_user(range.len, &urange->len);
4720 if (err)
4721 return -EFAULT;
4722
4723 return __f2fs_ioc_gc_range(file, &range);
4724}
4725
4726struct compat_f2fs_move_range {
4727 u32 dst_fd;
4728 compat_u64 pos_in;
4729 compat_u64 pos_out;
4730 compat_u64 len;
4731};
4732#define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4733 struct compat_f2fs_move_range)
4734
4735static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4736{
4737 struct compat_f2fs_move_range __user *urange;
4738 struct f2fs_move_range range;
4739 int err;
4740
4741 urange = compat_ptr(arg);
4742 err = get_user(range.dst_fd, &urange->dst_fd);
4743 err |= get_user(range.pos_in, &urange->pos_in);
4744 err |= get_user(range.pos_out, &urange->pos_out);
4745 err |= get_user(range.len, &urange->len);
4746 if (err)
4747 return -EFAULT;
4748
4749 return __f2fs_ioc_move_range(file, &range);
4750}
4751
Namjae Jeone9750822013-02-04 23:41:41 +09004752long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4753{
Chao Yu34178b1b2020-11-10 09:24:37 +08004754 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4755 return -EIO;
4756 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4757 return -ENOSPC;
4758
Namjae Jeone9750822013-02-04 23:41:41 +09004759 switch (cmd) {
Eric Biggers3357af82020-07-14 15:18:12 -07004760 case FS_IOC32_GETVERSION:
4761 cmd = FS_IOC_GETVERSION;
Chao Yu04ef4b62015-11-10 18:44:20 +08004762 break;
Chao Yu34178b1b2020-11-10 09:24:37 +08004763 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4764 return f2fs_compat_ioc_gc_range(file, arg);
4765 case F2FS_IOC32_MOVE_RANGE:
4766 return f2fs_compat_ioc_move_range(file, arg);
Chao Yu04ef4b62015-11-10 18:44:20 +08004767 case F2FS_IOC_START_ATOMIC_WRITE:
4768 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4769 case F2FS_IOC_START_VOLATILE_WRITE:
4770 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4771 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4772 case F2FS_IOC_SHUTDOWN:
Arnd Bergmann314999d2019-06-03 13:51:58 +02004773 case FITRIM:
Eric Biggers3357af82020-07-14 15:18:12 -07004774 case FS_IOC_SET_ENCRYPTION_POLICY:
4775 case FS_IOC_GET_ENCRYPTION_PWSALT:
4776 case FS_IOC_GET_ENCRYPTION_POLICY:
Eric Biggers8ce589c2019-08-04 19:35:48 -07004777 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4778 case FS_IOC_ADD_ENCRYPTION_KEY:
4779 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4780 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4781 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
Eric Biggersee446e12020-03-14 13:50:51 -07004782 case FS_IOC_GET_ENCRYPTION_NONCE:
Chao Yu04ef4b62015-11-10 18:44:20 +08004783 case F2FS_IOC_GARBAGE_COLLECT:
4784 case F2FS_IOC_WRITE_CHECKPOINT:
4785 case F2FS_IOC_DEFRAGMENT:
Jaegeuk Kime066b832017-04-13 15:17:00 -07004786 case F2FS_IOC_FLUSH_DEVICE:
Jaegeuk Kime65ef202017-07-21 12:58:59 -07004787 case F2FS_IOC_GET_FEATURES:
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08004788 case F2FS_IOC_GET_PIN_FILE:
4789 case F2FS_IOC_SET_PIN_FILE:
Chao Yuc4020b22018-01-11 14:42:30 +08004790 case F2FS_IOC_PRECACHE_EXTENTS:
Qiuyang Sun04f0b2e2019-06-05 11:33:25 +08004791 case F2FS_IOC_RESIZE_FS:
Eric Biggers95ae2512019-07-22 09:26:24 -07004792 case FS_IOC_ENABLE_VERITY:
4793 case FS_IOC_MEASURE_VERITY:
Eric Biggerse17fe652021-01-15 10:18:16 -08004794 case FS_IOC_READ_VERITY_METADATA:
Eric Biggers3357af82020-07-14 15:18:12 -07004795 case FS_IOC_GETFSLABEL:
4796 case FS_IOC_SETFSLABEL:
Chao Yu439dfb12020-02-21 18:09:21 +08004797 case F2FS_IOC_GET_COMPRESS_BLOCKS:
Chao Yuef8d5632020-03-06 15:36:09 +08004798 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
Chao Yuc75488f2020-03-06 14:35:33 +08004799 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
Daeho Jeong9af84642020-07-21 12:21:11 +09004800 case F2FS_IOC_SEC_TRIM_FILE:
Daeho Jeong9e2a5f82020-10-30 13:10:34 +09004801 case F2FS_IOC_GET_COMPRESS_OPTION:
Daeho Jeonge1e8deb2020-10-30 13:10:35 +09004802 case F2FS_IOC_SET_COMPRESS_OPTION:
Daeho Jeong5fdb3222020-12-03 15:56:15 +09004803 case F2FS_IOC_DECOMPRESS_FILE:
4804 case F2FS_IOC_COMPRESS_FILE:
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07004805 break;
Namjae Jeone9750822013-02-04 23:41:41 +09004806 default:
4807 return -ENOIOCTLCMD;
4808 }
Chao Yu34178b1b2020-11-10 09:24:37 +08004809 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
Namjae Jeone9750822013-02-04 23:41:41 +09004810}
4811#endif
4812
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004813const struct file_operations f2fs_file_operations = {
Chao Yu267378d2014-04-23 14:10:24 +08004814 .llseek = f2fs_llseek,
Chao Yu4c8ff702019-11-01 18:07:14 +08004815 .read_iter = f2fs_file_read_iter,
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07004816 .write_iter = f2fs_file_write_iter,
4817 .open = f2fs_file_open,
Jaegeuk Kim12662232014-12-05 14:37:37 -08004818 .release = f2fs_release_file,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004819 .mmap = f2fs_file_mmap,
Jaegeuk Kim7a10f012017-07-24 19:46:29 -07004820 .flush = f2fs_file_flush,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004821 .fsync = f2fs_sync_file,
4822 .fallocate = f2fs_fallocate,
4823 .unlocked_ioctl = f2fs_ioctl,
Namjae Jeone9750822013-02-04 23:41:41 +09004824#ifdef CONFIG_COMPAT
4825 .compat_ioctl = f2fs_compat_ioctl,
4826#endif
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004827 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04004828 .splice_write = iter_file_splice_write,
Daeho Jeong0f6b56e2021-08-02 21:22:45 -07004829 .fadvise = f2fs_file_fadvise,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004830};