blob: 6afd4562335fc0a3e6beb67d9ed6d041fffa19d9 [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09003 * fs/f2fs/file.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09007 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/stat.h>
11#include <linux/buffer_head.h>
12#include <linux/writeback.h>
Jaegeuk Kimae51fb32013-03-16 11:13:04 +090013#include <linux/blkdev.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090014#include <linux/falloc.h>
15#include <linux/types.h>
Namjae Jeone9750822013-02-04 23:41:41 +090016#include <linux/compat.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090017#include <linux/uaccess.h>
18#include <linux/mount.h>
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +090019#include <linux/pagevec.h>
Jaegeuk Kimdc91de72017-01-13 13:12:29 -080020#include <linux/uio.h>
Andy Shevchenko8da4b8c2016-05-20 17:01:00 -070021#include <linux/uuid.h>
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -070022#include <linux/file.h>
Chao Yu4507847c2019-07-17 17:06:11 +080023#include <linux/nls.h>
Daeho Jeong9af84642020-07-21 12:21:11 +090024#include <linux/sched/signal.h>
Miklos Szeredi9b1bb012021-04-07 14:36:43 +020025#include <linux/fileattr.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090026
27#include "f2fs.h"
28#include "node.h"
29#include "segment.h"
30#include "xattr.h"
31#include "acl.h"
Chao Yuc1c1b582015-07-10 18:08:10 +080032#include "gc.h"
Namjae Jeona2a4a7e2013-04-20 01:28:40 +090033#include <trace/events/f2fs.h>
Chao Yufa4320c2020-11-02 14:21:31 +080034#include <uapi/linux/f2fs.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090035
Souptick Joarderea4d4792018-04-15 01:40:02 +053036static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +080037{
38 struct inode *inode = file_inode(vmf->vma->vm_file);
Souptick Joarderea4d4792018-04-15 01:40:02 +053039 vm_fault_t ret;
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +080040
41 down_read(&F2FS_I(inode)->i_mmap_sem);
Souptick Joarderea4d4792018-04-15 01:40:02 +053042 ret = filemap_fault(vmf);
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +080043 up_read(&F2FS_I(inode)->i_mmap_sem);
44
Chao Yu8b83ac82020-04-16 18:16:56 +080045 if (!ret)
46 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
47 F2FS_BLKSIZE);
48
Chao Yud7648342019-04-15 15:22:19 +080049 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
50
Souptick Joarderea4d4792018-04-15 01:40:02 +053051 return ret;
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +080052}
53
Souptick Joarderea4d4792018-04-15 01:40:02 +053054static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090055{
56 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -080057 struct inode *inode = file_inode(vmf->vma->vm_file);
Jaegeuk Kim40813632014-09-02 15:31:18 -070058 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimbdf03292019-12-03 15:53:16 -080059 struct dnode_of_data dn;
Chao Yu4c8ff702019-11-01 18:07:14 +080060 bool need_alloc = true;
61 int err = 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090062
Chao Yue0fcd012020-12-26 18:07:01 +080063 if (unlikely(IS_IMMUTABLE(inode)))
64 return VM_FAULT_SIGBUS;
65
Jaegeuk Kimc6140412021-05-25 11:39:35 -070066 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
67 return VM_FAULT_SIGBUS;
68
Jaegeuk Kim1f227a32017-10-23 23:48:49 +020069 if (unlikely(f2fs_cp_error(sbi))) {
70 err = -EIO;
71 goto err;
72 }
73
Chao Yu00e09c02019-08-23 17:58:36 +080074 if (!f2fs_is_checkpoint_ready(sbi)) {
75 err = -ENOSPC;
Chao Yu955ebcd2019-07-22 17:57:05 +080076 goto err;
Chao Yu00e09c02019-08-23 17:58:36 +080077 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090078
Chao Yuc8e43d52021-02-02 16:03:11 +080079 err = f2fs_convert_inline_inode(inode);
80 if (err)
81 goto err;
82
Chao Yu4c8ff702019-11-01 18:07:14 +080083#ifdef CONFIG_F2FS_FS_COMPRESSION
84 if (f2fs_compressed_file(inode)) {
85 int ret = f2fs_is_compressed_cluster(inode, page->index);
86
87 if (ret < 0) {
88 err = ret;
89 goto err;
90 } else if (ret) {
Chao Yu4c8ff702019-11-01 18:07:14 +080091 need_alloc = false;
92 }
93 }
94#endif
Jaegeuk Kimbdf03292019-12-03 15:53:16 -080095 /* should do out of any locked page */
Chao Yu4c8ff702019-11-01 18:07:14 +080096 if (need_alloc)
97 f2fs_balance_fs(sbi, true);
Jaegeuk Kimbdf03292019-12-03 15:53:16 -080098
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090099 sb_start_pagefault(inode->i_sb);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700100
101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
Jaegeuk Kimb067ba1f2014-08-07 16:32:25 -0700102
Dave Jiang11bac802017-02-24 14:56:41 -0800103 file_update_time(vmf->vma->vm_file);
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +0800104 down_read(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900105 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900106 if (unlikely(page->mapping != inode->i_mapping ||
Namjae Jeon9851e6e2013-04-28 09:04:18 +0900107 page_offset(page) > i_size_read(inode) ||
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900108 !PageUptodate(page))) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900109 unlock_page(page);
110 err = -EFAULT;
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +0800111 goto out_sem;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900112 }
113
Chao Yu4c8ff702019-11-01 18:07:14 +0800114 if (need_alloc) {
115 /* block allocation */
Chao Yu0ef81832020-06-18 14:36:22 +0800116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
Chao Yu4c8ff702019-11-01 18:07:14 +0800117 set_new_dnode(&dn, inode, NULL, NULL, 0);
118 err = f2fs_get_block(&dn, page->index);
Chao Yu0ef81832020-06-18 14:36:22 +0800119 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
Chao Yu39a86952018-09-27 18:33:18 +0800120 }
121
Chao Yu06c75402020-03-02 17:34:27 +0800122#ifdef CONFIG_F2FS_FS_COMPRESSION
123 if (!need_alloc) {
124 set_new_dnode(&dn, inode, NULL, NULL, 0);
125 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
126 f2fs_put_dnode(&dn);
127 }
128#endif
129 if (err) {
130 unlock_page(page);
131 goto out_sem;
Chao Yu39a86952018-09-27 18:33:18 +0800132 }
133
Chao Yubae0ee72018-12-25 17:43:42 +0800134 f2fs_wait_on_page_writeback(page, DATA, false, true);
Chao Yu39a86952018-09-27 18:33:18 +0800135
136 /* wait for GCed page writeback via META_MAPPING */
137 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
138
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900139 /*
140 * check to see if the page is mapped already (no holes)
141 */
142 if (PageMappedToDisk(page))
Chao Yu39a86952018-09-27 18:33:18 +0800143 goto out_sem;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900144
145 /* page is wholly or partially inside EOF */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300146 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
Chao Yu9edcdab2015-09-11 14:43:52 +0800147 i_size_read(inode)) {
youngjun yoo193bea12018-05-30 04:21:14 +0900148 loff_t offset;
youngjun yoof11e98b2018-05-30 04:33:07 +0900149
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300150 offset = i_size_read(inode) & ~PAGE_MASK;
151 zero_user_segment(page, offset, PAGE_SIZE);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900152 }
153 set_page_dirty(page);
Jaegeuk Kim237c0792016-06-30 18:49:15 -0700154 if (!PageUptodate(page))
155 SetPageUptodate(page);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900156
Chao Yub0af6d42017-08-02 23:21:48 +0800157 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
Sahitya Tummalac75f2fe2018-10-05 10:47:39 +0530158 f2fs_update_time(sbi, REQ_TIME);
Chao Yub0af6d42017-08-02 23:21:48 +0800159
Jaegeuk Kime943a102013-10-25 14:26:31 +0900160 trace_f2fs_vm_page_mkwrite(page, DATA);
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +0800161out_sem:
162 up_read(&F2FS_I(inode)->i_mmap_sem);
Chao Yu39a86952018-09-27 18:33:18 +0800163
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900164 sb_end_pagefault(inode->i_sb);
Jaegeuk Kim1f227a32017-10-23 23:48:49 +0200165err:
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900166 return block_page_mkwrite_return(err);
167}
168
169static const struct vm_operations_struct f2fs_file_vm_ops = {
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +0800170 .fault = f2fs_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700171 .map_pages = filemap_map_pages,
Jaegeuk Kim692bb552013-01-17 18:37:41 +0900172 .page_mkwrite = f2fs_vm_page_mkwrite,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900173};
174
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900175static int get_parent_ino(struct inode *inode, nid_t *pino)
176{
177 struct dentry *dentry;
178
Eric Biggers84c9c2d2020-05-05 11:41:11 -0700179 /*
180 * Make sure to get the non-deleted alias. The alias associated with
181 * the open file descriptor being fsync()'ed may be deleted already.
182 */
183 dentry = d_find_alias(inode);
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900184 if (!dentry)
185 return 0;
186
Jaegeuk Kimf0947e52013-07-22 22:12:56 +0900187 *pino = parent_ino(dentry);
188 dput(dentry);
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900189 return 1;
190}
191
Chao Yua5fd5052017-11-06 22:51:45 +0800192static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
Chao Yu9d1589e2014-08-20 18:37:35 +0800193{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700194 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yua5fd5052017-11-06 22:51:45 +0800195 enum cp_reason_type cp_reason = CP_NO_NEEDED;
Chao Yu9d1589e2014-08-20 18:37:35 +0800196
Chao Yua5fd5052017-11-06 22:51:45 +0800197 if (!S_ISREG(inode->i_mode))
198 cp_reason = CP_NON_REGULAR;
Chao Yu4c8ff702019-11-01 18:07:14 +0800199 else if (f2fs_compressed_file(inode))
200 cp_reason = CP_COMPRESSED;
Chao Yua5fd5052017-11-06 22:51:45 +0800201 else if (inode->i_nlink != 1)
202 cp_reason = CP_HARDLINK;
Jaegeuk Kimbbf156f2016-08-29 18:23:45 -0700203 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
Chao Yua5fd5052017-11-06 22:51:45 +0800204 cp_reason = CP_SB_NEED_CP;
Chao Yu9d1589e2014-08-20 18:37:35 +0800205 else if (file_wrong_pino(inode))
Chao Yua5fd5052017-11-06 22:51:45 +0800206 cp_reason = CP_WRONG_PINO;
Chao Yu4d57b862018-05-30 00:20:41 +0800207 else if (!f2fs_space_for_roll_forward(sbi))
Chao Yua5fd5052017-11-06 22:51:45 +0800208 cp_reason = CP_NO_SPC_ROLL;
Chao Yu4d57b862018-05-30 00:20:41 +0800209 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
Chao Yua5fd5052017-11-06 22:51:45 +0800210 cp_reason = CP_NODE_NEED_CP;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700211 else if (test_opt(sbi, FASTBOOT))
Chao Yua5fd5052017-11-06 22:51:45 +0800212 cp_reason = CP_FASTBOOT_MODE;
Chao Yu63189b72018-03-08 14:22:56 +0800213 else if (F2FS_OPTION(sbi).active_logs == 2)
Chao Yua5fd5052017-11-06 22:51:45 +0800214 cp_reason = CP_SPEC_LOG_NUM;
Chao Yu63189b72018-03-08 14:22:56 +0800215 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
Chao Yu4d57b862018-05-30 00:20:41 +0800216 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
217 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
218 TRANS_DIR_INO))
Jaegeuk Kim0a007b972017-12-28 08:09:44 -0800219 cp_reason = CP_RECOVER_DIR;
Chao Yu9d1589e2014-08-20 18:37:35 +0800220
Chao Yua5fd5052017-11-06 22:51:45 +0800221 return cp_reason;
Chao Yu9d1589e2014-08-20 18:37:35 +0800222}
223
Changman Lee9c7bb702014-12-08 15:29:40 +0900224static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
225{
226 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
227 bool ret = false;
228 /* But we need to avoid that there are some inode updates */
Chao Yu4d57b862018-05-30 00:20:41 +0800229 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
Changman Lee9c7bb702014-12-08 15:29:40 +0900230 ret = true;
231 f2fs_put_page(i, 0);
232 return ret;
233}
234
Changman Lee51455b12014-12-08 15:29:41 +0900235static void try_to_fix_pino(struct inode *inode)
236{
237 struct f2fs_inode_info *fi = F2FS_I(inode);
238 nid_t pino;
239
240 down_write(&fi->i_sem);
Changman Lee51455b12014-12-08 15:29:41 +0900241 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
242 get_parent_ino(inode, &pino)) {
Jaegeuk Kim205b9822016-05-20 09:52:20 -0700243 f2fs_i_pino_write(inode, pino);
Changman Lee51455b12014-12-08 15:29:41 +0900244 file_got_pino(inode);
Changman Lee51455b12014-12-08 15:29:41 +0900245 }
Jaegeuk Kimee6d1822016-05-20 16:32:49 -0700246 up_write(&fi->i_sem);
Changman Lee51455b12014-12-08 15:29:41 +0900247}
248
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700249static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
250 int datasync, bool atomic)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900251{
252 struct inode *inode = file->f_mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -0700253 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim2403c152014-09-10 14:58:18 -0700254 nid_t ino = inode->i_ino;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900255 int ret = 0;
Chao Yua5fd5052017-11-06 22:51:45 +0800256 enum cp_reason_type cp_reason = 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900257 struct writeback_control wbc = {
Jaegeuk Kimc81bf1c2014-03-03 11:28:40 +0900258 .sync_mode = WB_SYNC_ALL,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900259 .nr_to_write = LONG_MAX,
260 .for_reclaim = 0,
261 };
Chao Yu50fa53e2018-08-02 23:03:19 +0800262 unsigned int seq_id = 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900263
Daniel Rosenberg43549942018-08-20 19:21:43 -0700264 if (unlikely(f2fs_readonly(inode->i_sb) ||
265 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
Namjae Jeon1fa95b02012-12-01 10:56:01 +0900266 return 0;
267
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900268 trace_f2fs_sync_file_enter(inode);
Jaegeuk Kimea1aa122014-07-24 19:11:43 -0700269
Yunlei Heb61ac5b72018-11-06 10:25:29 +0800270 if (S_ISDIR(inode->i_mode))
271 goto go_write;
272
Jaegeuk Kimea1aa122014-07-24 19:11:43 -0700273 /* if fdatasync is triggered, let's do in-place-update */
Jaegeuk Kimc46a1552015-12-31 13:49:17 -0800274 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
Jaegeuk Kim91942322016-05-20 10:13:22 -0700275 set_inode_flag(inode, FI_NEED_IPU);
Jeff Layton3b49c9a2017-07-07 15:20:52 -0400276 ret = file_write_and_wait_range(file, start, end);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700277 clear_inode_flag(inode, FI_NEED_IPU);
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -0700278
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900279 if (ret) {
Chao Yua5fd5052017-11-06 22:51:45 +0800280 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900281 return ret;
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900282 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900283
Changman Lee9c7bb702014-12-08 15:29:40 +0900284 /* if the inode is dirty, let's recover all the time */
Chao Yu281518c2016-11-17 20:53:31 +0800285 if (!f2fs_skip_inode_update(inode, datasync)) {
Jaegeuk Kim2286c022015-08-15 21:51:05 -0700286 f2fs_write_inode(inode, NULL);
Changman Lee9c7bb702014-12-08 15:29:40 +0900287 goto go_write;
288 }
289
Jaegeuk Kim6d99ba412014-07-24 19:08:02 -0700290 /*
291 * if there is no written data, don't waste time to write recovery info.
292 */
Jaegeuk Kim91942322016-05-20 10:13:22 -0700293 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
Chao Yu4d57b862018-05-30 00:20:41 +0800294 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700295
Changman Lee9c7bb702014-12-08 15:29:40 +0900296 /* it may call write_inode just prior to fsync */
297 if (need_inode_page_update(sbi, ino))
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700298 goto go_write;
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700299
Jaegeuk Kim91942322016-05-20 10:13:22 -0700300 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
Chao Yu4d57b862018-05-30 00:20:41 +0800301 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
Jaegeuk Kim6d99ba412014-07-24 19:08:02 -0700302 goto flush_out;
303 goto out;
304 }
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700305go_write:
Jaegeuk Kime5d23852013-07-03 10:55:52 +0900306 /*
307 * Both of fdatasync() and fsync() are able to be recovered from
308 * sudden-power-off.
309 */
Jaegeuk Kim91942322016-05-20 10:13:22 -0700310 down_read(&F2FS_I(inode)->i_sem);
Chao Yua5fd5052017-11-06 22:51:45 +0800311 cp_reason = need_do_checkpoint(inode);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700312 up_read(&F2FS_I(inode)->i_sem);
Jaegeuk Kimd928bfb2014-03-20 19:10:08 +0900313
Chao Yua5fd5052017-11-06 22:51:45 +0800314 if (cp_reason) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900315 /* all the dirty node pages should be flushed for POR */
316 ret = f2fs_sync_fs(inode->i_sb, 1);
Jaegeuk Kimd928bfb2014-03-20 19:10:08 +0900317
Changman Lee51455b12014-12-08 15:29:41 +0900318 /*
319 * We've secured consistency through sync_fs. Following pino
320 * will be used only for fsynced inodes after checkpoint.
321 */
322 try_to_fix_pino(inode);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700323 clear_inode_flag(inode, FI_APPEND_WRITE);
324 clear_inode_flag(inode, FI_UPDATE_WRITE);
Changman Lee51455b12014-12-08 15:29:41 +0900325 goto out;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900326 }
Changman Lee51455b12014-12-08 15:29:41 +0900327sync_nodes:
Chao Yuc29fd0c2018-06-04 23:20:36 +0800328 atomic_inc(&sbi->wb_sync_req[NODE]);
Chao Yu50fa53e2018-08-02 23:03:19 +0800329 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
Chao Yuc29fd0c2018-06-04 23:20:36 +0800330 atomic_dec(&sbi->wb_sync_req[NODE]);
Jaegeuk Kimc267ec12016-04-15 09:25:04 -0700331 if (ret)
332 goto out;
Changman Lee51455b12014-12-08 15:29:41 +0900333
Jaegeuk Kim871f5992015-01-09 16:27:17 -0800334 /* if cp_error was enabled, we should avoid infinite loop */
Chao Yu6d5a1492015-12-24 18:04:56 +0800335 if (unlikely(f2fs_cp_error(sbi))) {
336 ret = -EIO;
Jaegeuk Kim871f5992015-01-09 16:27:17 -0800337 goto out;
Chao Yu6d5a1492015-12-24 18:04:56 +0800338 }
Jaegeuk Kim871f5992015-01-09 16:27:17 -0800339
Chao Yu4d57b862018-05-30 00:20:41 +0800340 if (f2fs_need_inode_block_update(sbi, ino)) {
Jaegeuk Kim7c457292016-10-14 11:51:23 -0700341 f2fs_mark_inode_dirty_sync(inode, true);
Changman Lee51455b12014-12-08 15:29:41 +0900342 f2fs_write_inode(inode, NULL);
343 goto sync_nodes;
344 }
345
Jaegeuk Kimb6a245e2017-07-28 02:29:12 -0700346 /*
347 * If it's atomic_write, it's just fine to keep write ordering. So
348 * here we don't need to wait for node write completion, since we use
349 * node chain which serializes node blocks. If one of node writes are
350 * reordered, we can see simply broken chain, resulting in stopping
351 * roll-forward recovery. It means we'll recover all or none node blocks
352 * given fsync mark.
353 */
354 if (!atomic) {
Chao Yu50fa53e2018-08-02 23:03:19 +0800355 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
Jaegeuk Kimb6a245e2017-07-28 02:29:12 -0700356 if (ret)
357 goto out;
358 }
Changman Lee51455b12014-12-08 15:29:41 +0900359
360 /* once recovery info is written, don't need to tack this */
Chao Yu4d57b862018-05-30 00:20:41 +0800361 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700362 clear_inode_flag(inode, FI_APPEND_WRITE);
Changman Lee51455b12014-12-08 15:29:41 +0900363flush_out:
Jaegeuk Kimd6290812018-05-25 18:02:58 -0700364 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
Chao Yu39d787b2017-09-29 13:59:38 +0800365 ret = f2fs_issue_flush(sbi, inode->i_ino);
Chao Yu3f062522017-09-29 13:59:36 +0800366 if (!ret) {
Chao Yu4d57b862018-05-30 00:20:41 +0800367 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
Chao Yu3f062522017-09-29 13:59:36 +0800368 clear_inode_flag(inode, FI_UPDATE_WRITE);
Chao Yu4d57b862018-05-30 00:20:41 +0800369 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
Chao Yu3f062522017-09-29 13:59:36 +0800370 }
Jaegeuk Kimd0239e12016-01-08 16:57:48 -0800371 f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900372out:
Chao Yua5fd5052017-11-06 22:51:45 +0800373 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900374 return ret;
375}
376
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700377int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
378{
Jaegeuk Kim1f227a32017-10-23 23:48:49 +0200379 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
380 return -EIO;
Jaegeuk Kim608514d2016-04-15 09:43:17 -0700381 return f2fs_do_sync_file(file, start, end, datasync, false);
382}
383
Matthew Wilcox (Oracle)4cb03fe2020-08-24 22:48:41 +0100384static bool __found_offset(struct address_space *mapping, block_t blkaddr,
385 pgoff_t index, int whence)
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900386{
387 switch (whence) {
388 case SEEK_DATA:
Matthew Wilcox (Oracle)4cb03fe2020-08-24 22:48:41 +0100389 if (__is_valid_data_blkaddr(blkaddr))
390 return true;
391 if (blkaddr == NEW_ADDR &&
392 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900393 return true;
394 break;
395 case SEEK_HOLE:
396 if (blkaddr == NULL_ADDR)
397 return true;
398 break;
399 }
400 return false;
401}
402
Chao Yu267378d2014-04-23 14:10:24 +0800403static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
404{
405 struct inode *inode = file->f_mapping->host;
406 loff_t maxbytes = inode->i_sb->s_maxbytes;
407 struct dnode_of_data dn;
Matthew Wilcox (Oracle)4cb03fe2020-08-24 22:48:41 +0100408 pgoff_t pgofs, end_offset;
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900409 loff_t data_ofs = offset;
410 loff_t isize;
Chao Yu267378d2014-04-23 14:10:24 +0800411 int err = 0;
412
Al Viro59551022016-01-22 15:40:57 -0500413 inode_lock(inode);
Chao Yu267378d2014-04-23 14:10:24 +0800414
415 isize = i_size_read(inode);
416 if (offset >= isize)
417 goto fail;
418
419 /* handle inline data case */
Chao Yu7a6e59d2020-11-02 17:36:58 +0800420 if (f2fs_has_inline_data(inode)) {
421 if (whence == SEEK_HOLE) {
422 data_ofs = isize;
423 goto found;
424 } else if (whence == SEEK_DATA) {
425 data_ofs = offset;
426 goto found;
427 }
Chao Yu267378d2014-04-23 14:10:24 +0800428 }
429
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300430 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
Chao Yu267378d2014-04-23 14:10:24 +0800431
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300432 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
Chao Yu267378d2014-04-23 14:10:24 +0800433 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +0800434 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
Chao Yu267378d2014-04-23 14:10:24 +0800435 if (err && err != -ENOENT) {
436 goto fail;
437 } else if (err == -ENOENT) {
arter97e1c42042014-08-06 23:22:50 +0900438 /* direct node does not exists */
Chao Yu267378d2014-04-23 14:10:24 +0800439 if (whence == SEEK_DATA) {
Chao Yu4d57b862018-05-30 00:20:41 +0800440 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
Chao Yu267378d2014-04-23 14:10:24 +0800441 continue;
442 } else {
443 goto found;
444 }
445 }
446
Chao Yu81ca7352016-01-26 15:39:35 +0800447 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Chao Yu267378d2014-04-23 14:10:24 +0800448
449 /* find data/hole in dnode block */
450 for (; dn.ofs_in_node < end_offset;
451 dn.ofs_in_node++, pgofs++,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300452 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
Chao Yu267378d2014-04-23 14:10:24 +0800453 block_t blkaddr;
youngjun yoof11e98b2018-05-30 04:33:07 +0900454
Chao Yua2ced1c2020-02-14 17:44:10 +0800455 blkaddr = f2fs_data_blkaddr(&dn);
Chao Yu267378d2014-04-23 14:10:24 +0800456
Chao Yuc9b60782018-08-01 19:13:44 +0800457 if (__is_valid_data_blkaddr(blkaddr) &&
458 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
Chao Yu93770ab2019-04-15 15:26:32 +0800459 blkaddr, DATA_GENERIC_ENHANCE)) {
Chao Yuc9b60782018-08-01 19:13:44 +0800460 f2fs_put_dnode(&dn);
461 goto fail;
462 }
463
Matthew Wilcox (Oracle)4cb03fe2020-08-24 22:48:41 +0100464 if (__found_offset(file->f_mapping, blkaddr,
Chao Yue1da7872018-06-05 17:44:11 +0800465 pgofs, whence)) {
Chao Yu267378d2014-04-23 14:10:24 +0800466 f2fs_put_dnode(&dn);
467 goto found;
468 }
469 }
470 f2fs_put_dnode(&dn);
471 }
472
473 if (whence == SEEK_DATA)
474 goto fail;
Chao Yu267378d2014-04-23 14:10:24 +0800475found:
Jaegeuk Kimfe369bc2014-04-28 17:02:48 +0900476 if (whence == SEEK_HOLE && data_ofs > isize)
477 data_ofs = isize;
Al Viro59551022016-01-22 15:40:57 -0500478 inode_unlock(inode);
Chao Yu267378d2014-04-23 14:10:24 +0800479 return vfs_setpos(file, data_ofs, maxbytes);
480fail:
Al Viro59551022016-01-22 15:40:57 -0500481 inode_unlock(inode);
Chao Yu267378d2014-04-23 14:10:24 +0800482 return -ENXIO;
483}
484
485static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
486{
487 struct inode *inode = file->f_mapping->host;
488 loff_t maxbytes = inode->i_sb->s_maxbytes;
489
Chengguang Xu6d1451b2021-01-13 13:21:54 +0800490 if (f2fs_compressed_file(inode))
491 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
492
Chao Yu267378d2014-04-23 14:10:24 +0800493 switch (whence) {
494 case SEEK_SET:
495 case SEEK_CUR:
496 case SEEK_END:
497 return generic_file_llseek_size(file, offset, whence,
498 maxbytes, i_size_read(inode));
499 case SEEK_DATA:
500 case SEEK_HOLE:
Jaegeuk Kim0b4c5afde2014-09-08 10:59:43 -0700501 if (offset < 0)
502 return -ENXIO;
Chao Yu267378d2014-04-23 14:10:24 +0800503 return f2fs_seek_block(file, offset, whence);
504 }
505
506 return -EINVAL;
507}
508
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900509static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
510{
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700511 struct inode *inode = file_inode(file);
512
Jaegeuk Kim1f227a32017-10-23 23:48:49 +0200513 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
514 return -EIO;
515
Chao Yu4c8ff702019-11-01 18:07:14 +0800516 if (!f2fs_is_compress_backend_ready(inode))
517 return -EOPNOTSUPP;
518
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900519 file_accessed(file);
520 vma->vm_ops = &f2fs_file_vm_ops;
Chao Yu4c8ff702019-11-01 18:07:14 +0800521 set_inode_flag(inode, FI_MMAP_FILE);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900522 return 0;
523}
524
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -0700525static int f2fs_file_open(struct inode *inode, struct file *filp)
526{
Eric Biggers2e168c82017-11-29 12:35:28 -0800527 int err = fscrypt_file_open(inode, filp);
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -0700528
Eric Biggers2e168c82017-11-29 12:35:28 -0800529 if (err)
530 return err;
Hyunchul Leeb91050a2018-03-08 19:34:38 +0900531
Chao Yu4c8ff702019-11-01 18:07:14 +0800532 if (!f2fs_is_compress_backend_ready(inode))
533 return -EOPNOTSUPP;
534
Eric Biggers95ae2512019-07-22 09:26:24 -0700535 err = fsverity_file_open(inode, filp);
536 if (err)
537 return err;
538
Hyunchul Leeb91050a2018-03-08 19:34:38 +0900539 filp->f_mode |= FMODE_NOWAIT;
540
Chao Yu0abd6752017-07-09 00:13:07 +0800541 return dquot_file_open(inode, filp);
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -0700542}
543
Chao Yu4d57b862018-05-30 00:20:41 +0800544void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900545{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700546 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900547 struct f2fs_node *raw_node;
Chao Yu19b2c302015-08-26 20:34:48 +0800548 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900549 __le32 *addr;
Chao Yu7a2af762017-07-19 00:19:06 +0800550 int base = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +0800551 bool compressed_cluster = false;
552 int cluster_index = 0, valid_blocks = 0;
553 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
Daeho Jeongc2759eb2020-09-08 11:44:10 +0900554 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
Chao Yu7a2af762017-07-19 00:19:06 +0800555
556 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
557 base = get_extra_isize(dn->inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900558
Gu Zheng45590712013-07-15 17:57:38 +0800559 raw_node = F2FS_NODE(dn->node_page);
Chao Yu7a2af762017-07-19 00:19:06 +0800560 addr = blkaddr_in_node(raw_node) + base + ofs;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900561
Chao Yu4c8ff702019-11-01 18:07:14 +0800562 /* Assumption: truncateion starts with cluster */
563 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900564 block_t blkaddr = le32_to_cpu(*addr);
youngjun yoof11e98b2018-05-30 04:33:07 +0900565
Chao Yu4c8ff702019-11-01 18:07:14 +0800566 if (f2fs_compressed_file(dn->inode) &&
567 !(cluster_index & (cluster_size - 1))) {
568 if (compressed_cluster)
569 f2fs_i_compr_blocks_update(dn->inode,
570 valid_blocks, false);
571 compressed_cluster = (blkaddr == COMPRESS_ADDR);
572 valid_blocks = 0;
573 }
574
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900575 if (blkaddr == NULL_ADDR)
576 continue;
577
Jaegeuk Kime1509cf2014-12-30 22:57:55 -0800578 dn->data_blkaddr = NULL_ADDR;
Chao Yu4d57b862018-05-30 00:20:41 +0800579 f2fs_set_data_blkaddr(dn);
Chao Yuc9b60782018-08-01 19:13:44 +0800580
Chao Yu4c8ff702019-11-01 18:07:14 +0800581 if (__is_valid_data_blkaddr(blkaddr)) {
582 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
Chao Yu93770ab2019-04-15 15:26:32 +0800583 DATA_GENERIC_ENHANCE))
Chao Yu4c8ff702019-11-01 18:07:14 +0800584 continue;
585 if (compressed_cluster)
586 valid_blocks++;
587 }
Chao Yuc9b60782018-08-01 19:13:44 +0800588
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -0700589 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
Jaegeuk Kim91942322016-05-20 10:13:22 -0700590 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
Chao Yu4c8ff702019-11-01 18:07:14 +0800591
592 f2fs_invalidate_blocks(sbi, blkaddr);
Chao Yuef8d5632020-03-06 15:36:09 +0800593
594 if (!released || blkaddr != COMPRESS_ADDR)
595 nr_free++;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900596 }
Chao Yu19b2c302015-08-26 20:34:48 +0800597
Chao Yu4c8ff702019-11-01 18:07:14 +0800598 if (compressed_cluster)
599 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
600
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900601 if (nr_free) {
Chao Yu19b2c302015-08-26 20:34:48 +0800602 pgoff_t fofs;
603 /*
604 * once we invalidate valid blkaddr in range [ofs, ofs + count],
605 * we will invalidate all blkaddr in the whole range.
606 */
Chao Yu4d57b862018-05-30 00:20:41 +0800607 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
Chao Yu81ca7352016-01-26 15:39:35 +0800608 dn->inode) + ofs;
Chao Yu19b2c302015-08-26 20:34:48 +0800609 f2fs_update_extent_cache_range(dn, fofs, 0, len);
Namjae Jeond7cc9502013-06-08 21:25:40 +0900610 dec_valid_block_count(sbi, dn->inode, nr_free);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900611 }
612 dn->ofs_in_node = ofs;
Namjae Jeon51dd6242013-04-20 01:28:52 +0900613
Jaegeuk Kimd0239e12016-01-08 16:57:48 -0800614 f2fs_update_time(sbi, REQ_TIME);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900615 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
616 dn->ofs_in_node, nr_free);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900617}
618
Chao Yu4d57b862018-05-30 00:20:41 +0800619void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900620{
Chao Yud02a6e62019-03-25 21:08:19 +0800621 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900622}
623
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800624static int truncate_partial_data_page(struct inode *inode, u64 from,
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700625 bool cache_only)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900626{
youngjun yoo193bea12018-05-30 04:21:14 +0900627 loff_t offset = from & (PAGE_SIZE - 1);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300628 pgoff_t index = from >> PAGE_SHIFT;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700629 struct address_space *mapping = inode->i_mapping;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900630 struct page *page;
631
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700632 if (!offset && !cache_only)
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700633 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900634
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700635 if (cache_only) {
Jaegeuk Kim34b5d5c2016-09-06 15:55:54 -0700636 page = find_lock_page(mapping, index);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700637 if (page && PageUptodate(page))
638 goto truncate_out;
639 f2fs_put_page(page, 1);
640 return 0;
641 }
642
Chao Yu4d57b862018-05-30 00:20:41 +0800643 page = f2fs_get_lock_data_page(inode, index, true);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900644 if (IS_ERR(page))
Yunlei Hea78aaa22017-02-28 20:32:41 +0800645 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700646truncate_out:
Chao Yubae0ee72018-12-25 17:43:42 +0800647 f2fs_wait_on_page_writeback(page, DATA, true, true);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300648 zero_user(page, offset, PAGE_SIZE - offset);
Jaegeuk Kima9bcf9b2017-06-14 08:05:32 -0700649
650 /* An encrypted inode should have a key and truncate the last page. */
Chandan Rajendra62230e0d2018-12-12 15:20:11 +0530651 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
Jaegeuk Kima9bcf9b2017-06-14 08:05:32 -0700652 if (!cache_only)
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800653 set_page_dirty(page);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900654 f2fs_put_page(page, 1);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700655 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900656}
657
Chao Yu3265d3d2020-03-18 16:22:59 +0800658int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900659{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700660 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900661 struct dnode_of_data dn;
662 pgoff_t free_from;
Huajun Li9ffe0fb2013-11-10 23:13:20 +0800663 int count = 0, err = 0;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700664 struct page *ipage;
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800665 bool truncate_page = false;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900666
Namjae Jeon51dd6242013-04-20 01:28:52 +0900667 trace_f2fs_truncate_blocks_enter(inode, from);
668
Chao Yudf033ca2018-03-20 23:08:29 +0800669 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900670
Chengguang Xu6d1451b2021-01-13 13:21:54 +0800671 if (free_from >= max_file_blocks(inode))
Chao Yu09210c92016-05-05 19:13:03 +0800672 goto free_partial;
673
Jaegeuk Kim764aa3e2014-08-14 16:32:54 -0700674 if (lock)
Chao Yuc42d28c2019-02-02 17:33:01 +0800675 f2fs_lock_op(sbi);
Huajun Li9ffe0fb2013-11-10 23:13:20 +0800676
Chao Yu4d57b862018-05-30 00:20:41 +0800677 ipage = f2fs_get_node_page(sbi, inode->i_ino);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700678 if (IS_ERR(ipage)) {
679 err = PTR_ERR(ipage);
680 goto out;
681 }
682
683 if (f2fs_has_inline_data(inode)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800684 f2fs_truncate_inline_inode(inode, ipage, from);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700685 f2fs_put_page(ipage, 1);
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800686 truncate_page = true;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700687 goto out;
688 }
689
690 set_new_dnode(&dn, inode, ipage, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +0800691 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900692 if (err) {
693 if (err == -ENOENT)
694 goto free_next;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700695 goto out;
Jaegeuk Kim1ce86bf2014-10-15 10:24:34 -0700696 }
697
Chao Yu81ca7352016-01-26 15:39:35 +0800698 count = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900699
700 count -= dn.ofs_in_node;
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700701 f2fs_bug_on(sbi, count < 0);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900702
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900703 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
Chao Yu4d57b862018-05-30 00:20:41 +0800704 f2fs_truncate_data_blocks_range(&dn, count);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900705 free_from += count;
706 }
707
708 f2fs_put_dnode(&dn);
709free_next:
Chao Yu4d57b862018-05-30 00:20:41 +0800710 err = f2fs_truncate_inode_blocks(inode, free_from);
Jaegeuk Kim764d2c82014-11-11 11:01:01 -0800711out:
712 if (lock)
Chao Yuc42d28c2019-02-02 17:33:01 +0800713 f2fs_unlock_op(sbi);
Chao Yu09210c92016-05-05 19:13:03 +0800714free_partial:
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700715 /* lastly zero out the first data page */
716 if (!err)
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800717 err = truncate_partial_data_page(inode, from, truncate_page);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900718
Namjae Jeon51dd6242013-04-20 01:28:52 +0900719 trace_f2fs_truncate_blocks_exit(inode, err);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900720 return err;
721}
722
Chao Yu4c8ff702019-11-01 18:07:14 +0800723int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
724{
725 u64 free_from = from;
Chao Yu3265d3d2020-03-18 16:22:59 +0800726 int err;
Chao Yu4c8ff702019-11-01 18:07:14 +0800727
Chao Yu3265d3d2020-03-18 16:22:59 +0800728#ifdef CONFIG_F2FS_FS_COMPRESSION
Chao Yu4c8ff702019-11-01 18:07:14 +0800729 /*
730 * for compressed file, only support cluster size
731 * aligned truncation.
732 */
Chao Yu4fec3fc02020-04-08 19:55:17 +0800733 if (f2fs_compressed_file(inode))
734 free_from = round_up(from,
735 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
Chao Yu3265d3d2020-03-18 16:22:59 +0800736#endif
Chao Yu4c8ff702019-11-01 18:07:14 +0800737
Chao Yu3265d3d2020-03-18 16:22:59 +0800738 err = f2fs_do_truncate_blocks(inode, free_from, lock);
739 if (err)
740 return err;
741
742#ifdef CONFIG_F2FS_FS_COMPRESSION
Chao Yu17d76482020-08-10 18:38:45 +0800743 if (from != free_from) {
Chao Yu3265d3d2020-03-18 16:22:59 +0800744 err = f2fs_truncate_partial_cluster(inode, from, lock);
Chao Yu17d76482020-08-10 18:38:45 +0800745 if (err)
746 return err;
747 }
Chao Yu3265d3d2020-03-18 16:22:59 +0800748#endif
749
Chao Yu17d76482020-08-10 18:38:45 +0800750 return 0;
Chao Yu4c8ff702019-11-01 18:07:14 +0800751}
752
Jaegeuk Kim9a449e92016-06-02 13:49:38 -0700753int f2fs_truncate(struct inode *inode)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900754{
Chao Yub0154892015-08-24 17:39:42 +0800755 int err;
756
Jaegeuk Kim1f227a32017-10-23 23:48:49 +0200757 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
758 return -EIO;
759
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900760 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
761 S_ISLNK(inode->i_mode)))
Chao Yub0154892015-08-24 17:39:42 +0800762 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900763
Namjae Jeon51dd6242013-04-20 01:28:52 +0900764 trace_f2fs_truncate(inode);
765
Jaegeuk Kim14b44d22017-03-09 15:24:24 -0800766 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
Chao Yuc45d6002019-11-01 17:53:23 +0800767 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
Jaegeuk Kim14b44d22017-03-09 15:24:24 -0800768 return -EIO;
769 }
Arnd Bergmann7fa750a2018-08-13 23:38:06 +0200770
Yi Chen25fb04d2021-01-28 17:02:56 +0800771 err = dquot_initialize(inode);
772 if (err)
773 return err;
774
Jaegeuk Kim92dffd02014-11-11 14:10:01 -0800775 /* we should check inline_data size */
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800776 if (!f2fs_may_inline_data(inode)) {
Chao Yub0154892015-08-24 17:39:42 +0800777 err = f2fs_convert_inline_inode(inode);
778 if (err)
779 return err;
Jaegeuk Kim92dffd02014-11-11 14:10:01 -0800780 }
781
Chao Yuc42d28c2019-02-02 17:33:01 +0800782 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
Chao Yub0154892015-08-24 17:39:42 +0800783 if (err)
784 return err;
785
Deepa Dinamani078cd822016-09-14 07:48:04 -0700786 inode->i_mtime = inode->i_ctime = current_time(inode);
Jaegeuk Kim7c457292016-10-14 11:51:23 -0700787 f2fs_mark_inode_dirty_sync(inode, false);
Chao Yub0154892015-08-24 17:39:42 +0800788 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900789}
790
Christian Brauner549c7292021-01-21 14:19:43 +0100791int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
792 struct kstat *stat, u32 request_mask, unsigned int query_flags)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900793{
David Howellsa528d352017-01-31 16:46:22 +0000794 struct inode *inode = d_inode(path->dentry);
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800795 struct f2fs_inode_info *fi = F2FS_I(inode);
Chao Yu1c1d35d2018-01-25 14:54:42 +0800796 struct f2fs_inode *ri;
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800797 unsigned int flags;
798
Chao Yu1c1d35d2018-01-25 14:54:42 +0800799 if (f2fs_has_extra_attr(inode) &&
Chao Yu7beb01f2018-10-24 18:34:26 +0800800 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
Chao Yu1c1d35d2018-01-25 14:54:42 +0800801 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
802 stat->result_mask |= STATX_BTIME;
803 stat->btime.tv_sec = fi->i_crtime.tv_sec;
804 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
805 }
806
Eric Biggers36098552019-06-04 22:59:04 -0700807 flags = fi->i_flags;
Chao Yufd267252020-03-25 10:22:09 +0800808 if (flags & F2FS_COMPR_FL)
809 stat->attributes |= STATX_ATTR_COMPRESSED;
Chao Yu59c84402018-04-03 15:08:17 +0800810 if (flags & F2FS_APPEND_FL)
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800811 stat->attributes |= STATX_ATTR_APPEND;
Chandan Rajendra62230e0d2018-12-12 15:20:11 +0530812 if (IS_ENCRYPTED(inode))
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800813 stat->attributes |= STATX_ATTR_ENCRYPTED;
Chao Yu59c84402018-04-03 15:08:17 +0800814 if (flags & F2FS_IMMUTABLE_FL)
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800815 stat->attributes |= STATX_ATTR_IMMUTABLE;
Chao Yu59c84402018-04-03 15:08:17 +0800816 if (flags & F2FS_NODUMP_FL)
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800817 stat->attributes |= STATX_ATTR_NODUMP;
Eric Biggers924e3192019-10-29 13:41:40 -0700818 if (IS_VERITY(inode))
819 stat->attributes |= STATX_ATTR_VERITY;
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800820
Chao Yufd267252020-03-25 10:22:09 +0800821 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
822 STATX_ATTR_APPEND |
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800823 STATX_ATTR_ENCRYPTED |
824 STATX_ATTR_IMMUTABLE |
Eric Biggers924e3192019-10-29 13:41:40 -0700825 STATX_ATTR_NODUMP |
826 STATX_ATTR_VERITY);
Chao Yu1c6d8ee2017-05-03 23:59:12 +0800827
Christian Brauner0d56a452021-01-21 14:19:30 +0100828 generic_fillattr(&init_user_ns, inode, stat);
Jaegeuk Kim5b4267d2017-10-13 10:27:45 -0700829
830 /* we need to show initial sectors used for inline_data/dentries */
831 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
832 f2fs_has_inline_dentry(inode))
833 stat->blocks += (stat->size + 511) >> 9;
834
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900835 return 0;
836}
837
838#ifdef CONFIG_F2FS_FS_POSIX_ACL
Christian Braunere65ce2a2021-01-21 14:19:27 +0100839static void __setattr_copy(struct user_namespace *mnt_userns,
840 struct inode *inode, const struct iattr *attr)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900841{
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900842 unsigned int ia_valid = attr->ia_valid;
843
844 if (ia_valid & ATTR_UID)
845 inode->i_uid = attr->ia_uid;
846 if (ia_valid & ATTR_GID)
847 inode->i_gid = attr->ia_gid;
Amir Goldsteineb31e2f2019-11-24 21:31:45 +0200848 if (ia_valid & ATTR_ATIME)
849 inode->i_atime = attr->ia_atime;
850 if (ia_valid & ATTR_MTIME)
851 inode->i_mtime = attr->ia_mtime;
852 if (ia_valid & ATTR_CTIME)
853 inode->i_ctime = attr->ia_ctime;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900854 if (ia_valid & ATTR_MODE) {
855 umode_t mode = attr->ia_mode;
Christian Brauner2f221d62021-01-21 14:19:26 +0100856 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900857
Linus Torvalds7d6beb72021-02-23 13:39:45 -0800858 if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900859 mode &= ~S_ISGID;
Jaegeuk Kim91942322016-05-20 10:13:22 -0700860 set_acl_inode(inode, mode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900861 }
862}
863#else
864#define __setattr_copy setattr_copy
865#endif
866
Christian Brauner549c7292021-01-21 14:19:43 +0100867int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
868 struct iattr *attr)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900869{
David Howells2b0143b2015-03-17 22:25:59 +0000870 struct inode *inode = d_inode(dentry);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900871 int err;
872
Jaegeuk Kim1f227a32017-10-23 23:48:49 +0200873 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
874 return -EIO;
875
Chao Yue0fcd012020-12-26 18:07:01 +0800876 if (unlikely(IS_IMMUTABLE(inode)))
877 return -EPERM;
878
879 if (unlikely(IS_APPEND(inode) &&
880 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
881 ATTR_GID | ATTR_TIMES_SET))))
882 return -EPERM;
883
Chao Yu4c8ff702019-11-01 18:07:14 +0800884 if ((attr->ia_valid & ATTR_SIZE) &&
885 !f2fs_is_compress_backend_ready(inode))
886 return -EOPNOTSUPP;
887
Christian Brauner2f221d62021-01-21 14:19:26 +0100888 err = setattr_prepare(&init_user_ns, dentry, attr);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900889 if (err)
890 return err;
891
Eric Biggers20bb2472017-11-29 12:35:32 -0800892 err = fscrypt_prepare_setattr(dentry, attr);
893 if (err)
894 return err;
895
Eric Biggers95ae2512019-07-22 09:26:24 -0700896 err = fsverity_prepare_setattr(dentry, attr);
897 if (err)
898 return err;
899
Chao Yu0abd6752017-07-09 00:13:07 +0800900 if (is_quota_modification(inode, attr)) {
901 err = dquot_initialize(inode);
902 if (err)
903 return err;
904 }
905 if ((attr->ia_valid & ATTR_UID &&
906 !uid_eq(attr->ia_uid, inode->i_uid)) ||
907 (attr->ia_valid & ATTR_GID &&
908 !gid_eq(attr->ia_gid, inode->i_gid))) {
Chao Yuaf033b22018-09-20 20:05:00 +0800909 f2fs_lock_op(F2FS_I_SB(inode));
Chao Yu0abd6752017-07-09 00:13:07 +0800910 err = dquot_transfer(inode, attr);
Chao Yuaf033b22018-09-20 20:05:00 +0800911 if (err) {
912 set_sbi_flag(F2FS_I_SB(inode),
913 SBI_QUOTA_NEED_REPAIR);
914 f2fs_unlock_op(F2FS_I_SB(inode));
Chao Yu0abd6752017-07-09 00:13:07 +0800915 return err;
Chao Yuaf033b22018-09-20 20:05:00 +0800916 }
917 /*
918 * update uid/gid under lock_op(), so that dquot and inode can
919 * be updated atomically.
920 */
921 if (attr->ia_valid & ATTR_UID)
922 inode->i_uid = attr->ia_uid;
923 if (attr->ia_valid & ATTR_GID)
924 inode->i_gid = attr->ia_gid;
925 f2fs_mark_inode_dirty_sync(inode, true);
926 f2fs_unlock_op(F2FS_I_SB(inode));
Chao Yu0abd6752017-07-09 00:13:07 +0800927 }
928
Chao Yu09db6a22014-09-15 18:02:09 +0800929 if (attr->ia_valid & ATTR_SIZE) {
Jaegeuk Kimcfb9a342019-09-03 10:06:26 +0800930 loff_t old_size = i_size_read(inode);
931
932 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
933 /*
934 * should convert inline inode before i_size_write to
935 * keep smaller than inline_data size with inline flag.
936 */
937 err = f2fs_convert_inline_inode(inode);
938 if (err)
939 return err;
940 }
Chao Yu0cab80e2015-12-01 11:36:16 +0800941
Chao Yua33c1502018-08-05 23:04:25 +0800942 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +0900943 down_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yua33c1502018-08-05 23:04:25 +0800944
945 truncate_setsize(inode, attr->ia_size);
946
Jaegeuk Kimcfb9a342019-09-03 10:06:26 +0800947 if (attr->ia_size <= old_size)
Chao Yua33c1502018-08-05 23:04:25 +0800948 err = f2fs_truncate(inode);
949 /*
950 * do not trim all blocks after i_size if target size is
951 * larger than i_size.
952 */
Chao Yua33c1502018-08-05 23:04:25 +0800953 up_write(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +0900954 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yua33c1502018-08-05 23:04:25 +0800955 if (err)
956 return err;
957
Chao Yuc10c9822020-02-27 19:30:03 +0800958 spin_lock(&F2FS_I(inode)->i_size_lock);
Jaegeuk Kimcfb9a342019-09-03 10:06:26 +0800959 inode->i_mtime = inode->i_ctime = current_time(inode);
Chao Yua0d00fa2017-10-09 17:55:19 +0800960 F2FS_I(inode)->last_disk_size = i_size_read(inode);
Chao Yuc10c9822020-02-27 19:30:03 +0800961 spin_unlock(&F2FS_I(inode)->i_size_lock);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900962 }
963
Christian Brauner2f221d62021-01-21 14:19:26 +0100964 __setattr_copy(&init_user_ns, inode, attr);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900965
966 if (attr->ia_valid & ATTR_MODE) {
Linus Torvalds7d6beb72021-02-23 13:39:45 -0800967 err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
Chao Yu17232e832020-12-25 16:52:27 +0800968
969 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
970 if (!err)
971 inode->i_mode = F2FS_I(inode)->i_acl_mode;
Jaegeuk Kim91942322016-05-20 10:13:22 -0700972 clear_inode_flag(inode, FI_ACL_MODE);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900973 }
974 }
975
Yunlei Hec0ed4402016-12-11 15:35:15 +0800976 /* file size may changed here */
Chao Yuca597bd2019-02-23 09:48:27 +0800977 f2fs_mark_inode_dirty_sync(inode, true);
Jaegeuk Kim15d04352016-10-14 13:30:31 -0700978
979 /* inode change will produce dirty node pages flushed by checkpoint */
980 f2fs_balance_fs(F2FS_I_SB(inode), true);
981
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900982 return err;
983}
984
985const struct inode_operations f2fs_file_inode_operations = {
986 .getattr = f2fs_getattr,
987 .setattr = f2fs_setattr,
988 .get_acl = f2fs_get_acl,
Christoph Hellwiga6dda0e2013-12-20 05:16:45 -0800989 .set_acl = f2fs_set_acl,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900990 .listxattr = f2fs_listxattr,
Jaegeuk Kim9ab701342014-06-08 04:30:14 +0900991 .fiemap = f2fs_fiemap,
Miklos Szeredi9b1bb012021-04-07 14:36:43 +0200992 .fileattr_get = f2fs_fileattr_get,
993 .fileattr_set = f2fs_fileattr_set,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900994};
995
Chao Yu63943282015-08-07 18:36:06 +0800996static int fill_zero(struct inode *inode, pgoff_t index,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900997 loff_t start, loff_t len)
998{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700999 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001000 struct page *page;
1001
1002 if (!len)
Chao Yu63943282015-08-07 18:36:06 +08001003 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001004
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08001005 f2fs_balance_fs(sbi, true);
Jaegeuk Kimbd43df02013-01-25 18:33:41 +09001006
Gu Zhenge4795562013-09-27 18:08:30 +08001007 f2fs_lock_op(sbi);
Chao Yu4d57b862018-05-30 00:20:41 +08001008 page = f2fs_get_new_data_page(inode, NULL, index, false);
Gu Zhenge4795562013-09-27 18:08:30 +08001009 f2fs_unlock_op(sbi);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001010
Chao Yu63943282015-08-07 18:36:06 +08001011 if (IS_ERR(page))
1012 return PTR_ERR(page);
1013
Chao Yubae0ee72018-12-25 17:43:42 +08001014 f2fs_wait_on_page_writeback(page, DATA, true, true);
Chao Yu63943282015-08-07 18:36:06 +08001015 zero_user(page, start, len);
1016 set_page_dirty(page);
1017 f2fs_put_page(page, 1);
1018 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001019}
1020
Chao Yu4d57b862018-05-30 00:20:41 +08001021int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001022{
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001023 int err;
1024
Chao Yuea587112015-09-17 20:22:44 +08001025 while (pg_start < pg_end) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001026 struct dnode_of_data dn;
Chao Yuea587112015-09-17 20:22:44 +08001027 pgoff_t end_offset, count;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001028
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001029 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001030 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001031 if (err) {
Chao Yuea587112015-09-17 20:22:44 +08001032 if (err == -ENOENT) {
Chao Yu4d57b862018-05-30 00:20:41 +08001033 pg_start = f2fs_get_next_page_offset(&dn,
1034 pg_start);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001035 continue;
Chao Yuea587112015-09-17 20:22:44 +08001036 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001037 return err;
1038 }
1039
Chao Yu81ca7352016-01-26 15:39:35 +08001040 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Chao Yuea587112015-09-17 20:22:44 +08001041 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1042
1043 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1044
Chao Yu4d57b862018-05-30 00:20:41 +08001045 f2fs_truncate_data_blocks_range(&dn, count);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001046 f2fs_put_dnode(&dn);
Chao Yuea587112015-09-17 20:22:44 +08001047
1048 pg_start += count;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001049 }
1050 return 0;
1051}
1052
Chao Yua66c7b22013-11-22 16:52:50 +08001053static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001054{
1055 pgoff_t pg_start, pg_end;
1056 loff_t off_start, off_end;
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001057 int ret;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001058
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001059 ret = f2fs_convert_inline_inode(inode);
1060 if (ret)
1061 return ret;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001062
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001063 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1064 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001065
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001066 off_start = offset & (PAGE_SIZE - 1);
1067 off_end = (offset + len) & (PAGE_SIZE - 1);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001068
1069 if (pg_start == pg_end) {
Chao Yu63943282015-08-07 18:36:06 +08001070 ret = fill_zero(inode, pg_start, off_start,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001071 off_end - off_start);
Chao Yu63943282015-08-07 18:36:06 +08001072 if (ret)
1073 return ret;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001074 } else {
Chao Yu63943282015-08-07 18:36:06 +08001075 if (off_start) {
1076 ret = fill_zero(inode, pg_start++, off_start,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001077 PAGE_SIZE - off_start);
Chao Yu63943282015-08-07 18:36:06 +08001078 if (ret)
1079 return ret;
1080 }
1081 if (off_end) {
1082 ret = fill_zero(inode, pg_end, 0, off_end);
1083 if (ret)
1084 return ret;
1085 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001086
1087 if (pg_start < pg_end) {
1088 struct address_space *mapping = inode->i_mapping;
1089 loff_t blk_start, blk_end;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001090 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jason Hrycay1127a3d2013-04-08 20:16:44 -05001091
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08001092 f2fs_balance_fs(sbi, true);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001093
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001094 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1095 blk_end = (loff_t)pg_end << PAGE_SHIFT;
Chao Yua33c1502018-08-05 23:04:25 +08001096
Chao Yua33c1502018-08-05 23:04:25 +08001097 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001098 down_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yua33c1502018-08-05 23:04:25 +08001099
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001100 truncate_inode_pages_range(mapping, blk_start,
1101 blk_end - 1);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001102
Gu Zhenge4795562013-09-27 18:08:30 +08001103 f2fs_lock_op(sbi);
Chao Yu4d57b862018-05-30 00:20:41 +08001104 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
Gu Zhenge4795562013-09-27 18:08:30 +08001105 f2fs_unlock_op(sbi);
Chao Yua33c1502018-08-05 23:04:25 +08001106
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +08001107 up_write(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001108 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001109 }
1110 }
1111
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001112 return ret;
1113}
1114
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001115static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1116 int *do_replace, pgoff_t off, pgoff_t len)
Chao Yub4ace332015-05-06 13:09:46 +08001117{
1118 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1119 struct dnode_of_data dn;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001120 int ret, done, i;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001121
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001122next_dnode:
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001123 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001124 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001125 if (ret && ret != -ENOENT) {
1126 return ret;
1127 } else if (ret == -ENOENT) {
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001128 if (dn.max_level == 0)
1129 return -ENOENT;
Chao Yu4c8ff702019-11-01 18:07:14 +08001130 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1131 dn.ofs_in_node, len);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001132 blkaddr += done;
1133 do_replace += done;
1134 goto next;
1135 }
1136
1137 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1138 dn.ofs_in_node, len);
1139 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
Chao Yua2ced1c2020-02-14 17:44:10 +08001140 *blkaddr = f2fs_data_blkaddr(&dn);
Chao Yu93770ab2019-04-15 15:26:32 +08001141
1142 if (__is_valid_data_blkaddr(*blkaddr) &&
1143 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1144 DATA_GENERIC_ENHANCE)) {
1145 f2fs_put_dnode(&dn);
Chao Yu10f966b2019-06-20 11:36:14 +08001146 return -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +08001147 }
1148
Chao Yu4d57b862018-05-30 00:20:41 +08001149 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001150
Chao Yub0332a02020-02-14 17:44:12 +08001151 if (f2fs_lfs_mode(sbi)) {
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001152 f2fs_put_dnode(&dn);
Chao Yufd114ab2019-08-15 19:45:36 +08001153 return -EOPNOTSUPP;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001154 }
1155
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001156 /* do not invalidate this block address */
Chao Yuf28b3432016-02-24 17:16:47 +08001157 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001158 *do_replace = 1;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001159 }
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001160 }
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001161 f2fs_put_dnode(&dn);
1162next:
1163 len -= done;
1164 off += done;
1165 if (len)
1166 goto next_dnode;
1167 return 0;
1168}
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001169
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001170static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1171 int *do_replace, pgoff_t off, int len)
1172{
1173 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1174 struct dnode_of_data dn;
1175 int ret, i;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001176
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001177 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1178 if (*do_replace == 0)
1179 continue;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001180
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001181 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001182 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001183 if (ret) {
1184 dec_valid_block_count(sbi, inode, 1);
Chao Yu4d57b862018-05-30 00:20:41 +08001185 f2fs_invalidate_blocks(sbi, *blkaddr);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001186 } else {
1187 f2fs_update_data_blkaddr(&dn, *blkaddr);
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001188 }
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001189 f2fs_put_dnode(&dn);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001190 }
1191 return 0;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001192}
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001193
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001194static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1195 block_t *blkaddr, int *do_replace,
1196 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1197{
1198 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1199 pgoff_t i = 0;
1200 int ret;
1201
1202 while (i < len) {
1203 if (blkaddr[i] == NULL_ADDR && !full) {
1204 i++;
1205 continue;
1206 }
1207
1208 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1209 struct dnode_of_data dn;
1210 struct node_info ni;
1211 size_t new_size;
1212 pgoff_t ilen;
1213
1214 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001215 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001216 if (ret)
1217 return ret;
1218
Chao Yu77357302018-07-17 00:02:17 +08001219 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1220 if (ret) {
1221 f2fs_put_dnode(&dn);
1222 return ret;
1223 }
1224
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001225 ilen = min((pgoff_t)
1226 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1227 dn.ofs_in_node, len - i);
1228 do {
Chao Yua2ced1c2020-02-14 17:44:10 +08001229 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
Chao Yu4d57b862018-05-30 00:20:41 +08001230 f2fs_truncate_data_blocks_range(&dn, 1);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001231
1232 if (do_replace[i]) {
1233 f2fs_i_blocks_write(src_inode,
Chao Yu0abd6752017-07-09 00:13:07 +08001234 1, false, false);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001235 f2fs_i_blocks_write(dst_inode,
Chao Yu0abd6752017-07-09 00:13:07 +08001236 1, true, false);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001237 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1238 blkaddr[i], ni.version, true, false);
1239
1240 do_replace[i] = 0;
1241 }
1242 dn.ofs_in_node++;
1243 i++;
Chao Yu1f0d5c92019-11-07 17:29:00 +08001244 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001245 if (dst_inode->i_size < new_size)
1246 f2fs_i_size_write(dst_inode, new_size);
Jaegeuk Kime87f7322016-11-23 10:51:17 -08001247 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001248
1249 f2fs_put_dnode(&dn);
1250 } else {
1251 struct page *psrc, *pdst;
1252
Chao Yu4d57b862018-05-30 00:20:41 +08001253 psrc = f2fs_get_lock_data_page(src_inode,
1254 src + i, true);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001255 if (IS_ERR(psrc))
1256 return PTR_ERR(psrc);
Chao Yu4d57b862018-05-30 00:20:41 +08001257 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001258 true);
1259 if (IS_ERR(pdst)) {
1260 f2fs_put_page(psrc, 1);
1261 return PTR_ERR(pdst);
1262 }
1263 f2fs_copy_page(psrc, pdst);
1264 set_page_dirty(pdst);
1265 f2fs_put_page(pdst, 1);
1266 f2fs_put_page(psrc, 1);
1267
Chao Yu4d57b862018-05-30 00:20:41 +08001268 ret = f2fs_truncate_hole(src_inode,
1269 src + i, src + i + 1);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001270 if (ret)
1271 return ret;
1272 i++;
1273 }
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001274 }
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001275 return 0;
1276}
1277
1278static int __exchange_data_block(struct inode *src_inode,
1279 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001280 pgoff_t len, bool full)
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001281{
1282 block_t *src_blkaddr;
1283 int *do_replace;
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001284 pgoff_t olen;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001285 int ret;
1286
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001287 while (len) {
Chao Yud02a6e62019-03-25 21:08:19 +08001288 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001289
Chao Yu628b3d12017-11-30 19:28:18 +08001290 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
Kees Cook9d2a7892018-06-12 14:28:35 -07001291 array_size(olen, sizeof(block_t)),
Jaegeuk Kim4f4460c2019-12-03 19:02:15 -08001292 GFP_NOFS);
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001293 if (!src_blkaddr)
1294 return -ENOMEM;
1295
Chao Yu628b3d12017-11-30 19:28:18 +08001296 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
Kees Cook9d2a7892018-06-12 14:28:35 -07001297 array_size(olen, sizeof(int)),
Jaegeuk Kim4f4460c2019-12-03 19:02:15 -08001298 GFP_NOFS);
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001299 if (!do_replace) {
1300 kvfree(src_blkaddr);
1301 return -ENOMEM;
1302 }
1303
1304 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1305 do_replace, src, olen);
1306 if (ret)
1307 goto roll_back;
1308
1309 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1310 do_replace, src, dst, olen, full);
1311 if (ret)
1312 goto roll_back;
1313
1314 src += olen;
1315 dst += olen;
1316 len -= olen;
1317
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001318 kvfree(src_blkaddr);
Jaegeuk Kim363cad7f2016-07-16 21:59:22 -07001319 kvfree(do_replace);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001320 }
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001321 return 0;
1322
1323roll_back:
Chao Yu9fd62602018-05-28 23:47:19 +08001324 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001325 kvfree(src_blkaddr);
1326 kvfree(do_replace);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001327 return ret;
1328}
1329
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001330static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001331{
1332 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Geert Uytterhoevenf91108b2019-06-20 16:42:08 +02001333 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001334 pgoff_t start = offset >> PAGE_SHIFT;
1335 pgoff_t end = (offset + len) >> PAGE_SHIFT;
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001336 int ret;
Chao Yub4ace332015-05-06 13:09:46 +08001337
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001338 f2fs_balance_fs(sbi, true);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001339
1340 /* avoid gc operation during block exchange */
1341 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1342 down_write(&F2FS_I(inode)->i_mmap_sem);
1343
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001344 f2fs_lock_op(sbi);
Jaegeuk Kim5f281fa2016-07-12 11:07:52 -07001345 f2fs_drop_extent_tree(inode);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001346 truncate_pagecache(inode, offset);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001347 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1348 f2fs_unlock_op(sbi);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001349
1350 up_write(&F2FS_I(inode)->i_mmap_sem);
1351 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yub4ace332015-05-06 13:09:46 +08001352 return ret;
1353}
1354
1355static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1356{
Chao Yub4ace332015-05-06 13:09:46 +08001357 loff_t new_size;
1358 int ret;
1359
Chao Yub4ace332015-05-06 13:09:46 +08001360 if (offset + len >= i_size_read(inode))
1361 return -EINVAL;
1362
1363 /* collapse range should be aligned to block size of f2fs. */
1364 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1365 return -EINVAL;
1366
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001367 ret = f2fs_convert_inline_inode(inode);
1368 if (ret)
1369 return ret;
Jaegeuk Kim97a7b2c2015-06-17 13:59:05 -07001370
Chao Yub4ace332015-05-06 13:09:46 +08001371 /* write out all dirty pages from offset */
1372 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1373 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001374 return ret;
Chao Yubb066642017-11-03 10:21:05 +08001375
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001376 ret = f2fs_do_collapse(inode, offset, len);
Chao Yub4ace332015-05-06 13:09:46 +08001377 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001378 return ret;
Chao Yub4ace332015-05-06 13:09:46 +08001379
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001380 /* write out all moved pages, if possible */
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001381 down_write(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001382 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1383 truncate_pagecache(inode, offset);
1384
Chao Yub4ace332015-05-06 13:09:46 +08001385 new_size = i_size_read(inode) - len;
Chao Yuc42d28c2019-02-02 17:33:01 +08001386 ret = f2fs_truncate_blocks(inode, new_size, true);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001387 up_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yub4ace332015-05-06 13:09:46 +08001388 if (!ret)
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07001389 f2fs_i_size_write(inode, new_size);
Chao Yub4ace332015-05-06 13:09:46 +08001390 return ret;
1391}
1392
Chao Yu6e961942016-05-09 19:56:31 +08001393static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1394 pgoff_t end)
1395{
1396 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1397 pgoff_t index = start;
1398 unsigned int ofs_in_node = dn->ofs_in_node;
1399 blkcnt_t count = 0;
1400 int ret;
1401
1402 for (; index < end; index++, dn->ofs_in_node++) {
Chao Yua2ced1c2020-02-14 17:44:10 +08001403 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
Chao Yu6e961942016-05-09 19:56:31 +08001404 count++;
1405 }
1406
1407 dn->ofs_in_node = ofs_in_node;
Chao Yu4d57b862018-05-30 00:20:41 +08001408 ret = f2fs_reserve_new_blocks(dn, count);
Chao Yu6e961942016-05-09 19:56:31 +08001409 if (ret)
1410 return ret;
1411
1412 dn->ofs_in_node = ofs_in_node;
1413 for (index = start; index < end; index++, dn->ofs_in_node++) {
Chao Yua2ced1c2020-02-14 17:44:10 +08001414 dn->data_blkaddr = f2fs_data_blkaddr(dn);
Chao Yu6e961942016-05-09 19:56:31 +08001415 /*
Chao Yu4d57b862018-05-30 00:20:41 +08001416 * f2fs_reserve_new_blocks will not guarantee entire block
Chao Yu6e961942016-05-09 19:56:31 +08001417 * allocation.
1418 */
1419 if (dn->data_blkaddr == NULL_ADDR) {
1420 ret = -ENOSPC;
1421 break;
1422 }
1423 if (dn->data_blkaddr != NEW_ADDR) {
Chao Yu4d57b862018-05-30 00:20:41 +08001424 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
Chao Yu6e961942016-05-09 19:56:31 +08001425 dn->data_blkaddr = NEW_ADDR;
Chao Yu4d57b862018-05-30 00:20:41 +08001426 f2fs_set_data_blkaddr(dn);
Chao Yu6e961942016-05-09 19:56:31 +08001427 }
1428 }
1429
1430 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1431
1432 return ret;
1433}
1434
Chao Yu75cd4e02015-05-06 13:11:13 +08001435static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1436 int mode)
1437{
1438 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1439 struct address_space *mapping = inode->i_mapping;
1440 pgoff_t index, pg_start, pg_end;
1441 loff_t new_size = i_size_read(inode);
1442 loff_t off_start, off_end;
1443 int ret = 0;
1444
Chao Yu75cd4e02015-05-06 13:11:13 +08001445 ret = inode_newsize_ok(inode, (len + offset));
1446 if (ret)
1447 return ret;
1448
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001449 ret = f2fs_convert_inline_inode(inode);
1450 if (ret)
1451 return ret;
Chao Yu75cd4e02015-05-06 13:11:13 +08001452
1453 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1454 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001455 return ret;
Chao Yu75cd4e02015-05-06 13:11:13 +08001456
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001457 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1458 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
Chao Yu75cd4e02015-05-06 13:11:13 +08001459
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001460 off_start = offset & (PAGE_SIZE - 1);
1461 off_end = (offset + len) & (PAGE_SIZE - 1);
Chao Yu75cd4e02015-05-06 13:11:13 +08001462
1463 if (pg_start == pg_end) {
Chao Yu63943282015-08-07 18:36:06 +08001464 ret = fill_zero(inode, pg_start, off_start,
1465 off_end - off_start);
1466 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001467 return ret;
Chao Yu63943282015-08-07 18:36:06 +08001468
Chao Yu75cd4e02015-05-06 13:11:13 +08001469 new_size = max_t(loff_t, new_size, offset + len);
1470 } else {
1471 if (off_start) {
Chao Yu63943282015-08-07 18:36:06 +08001472 ret = fill_zero(inode, pg_start++, off_start,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001473 PAGE_SIZE - off_start);
Chao Yu63943282015-08-07 18:36:06 +08001474 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001475 return ret;
Chao Yu63943282015-08-07 18:36:06 +08001476
Chao Yu75cd4e02015-05-06 13:11:13 +08001477 new_size = max_t(loff_t, new_size,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001478 (loff_t)pg_start << PAGE_SHIFT);
Chao Yu75cd4e02015-05-06 13:11:13 +08001479 }
1480
Chao Yu6e961942016-05-09 19:56:31 +08001481 for (index = pg_start; index < pg_end;) {
Chao Yu75cd4e02015-05-06 13:11:13 +08001482 struct dnode_of_data dn;
Chao Yu6e961942016-05-09 19:56:31 +08001483 unsigned int end_offset;
1484 pgoff_t end;
Chao Yu75cd4e02015-05-06 13:11:13 +08001485
Chao Yuc70798532018-08-05 23:02:22 +08001486 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001487 down_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yuc70798532018-08-05 23:02:22 +08001488
1489 truncate_pagecache_range(inode,
1490 (loff_t)index << PAGE_SHIFT,
1491 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1492
Chao Yu75cd4e02015-05-06 13:11:13 +08001493 f2fs_lock_op(sbi);
1494
Chao Yu6e961942016-05-09 19:56:31 +08001495 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001496 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
Chao Yu75cd4e02015-05-06 13:11:13 +08001497 if (ret) {
1498 f2fs_unlock_op(sbi);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001499 up_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yuc70798532018-08-05 23:02:22 +08001500 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yu75cd4e02015-05-06 13:11:13 +08001501 goto out;
1502 }
1503
Chao Yu6e961942016-05-09 19:56:31 +08001504 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1505 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1506
1507 ret = f2fs_do_zero_range(&dn, index, end);
Chao Yu75cd4e02015-05-06 13:11:13 +08001508 f2fs_put_dnode(&dn);
Chao Yuc70798532018-08-05 23:02:22 +08001509
Chao Yu75cd4e02015-05-06 13:11:13 +08001510 f2fs_unlock_op(sbi);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001511 up_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yuc70798532018-08-05 23:02:22 +08001512 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yu9434fcd2016-10-11 22:57:02 +08001513
1514 f2fs_balance_fs(sbi, dn.node_changed);
1515
Chao Yu6e961942016-05-09 19:56:31 +08001516 if (ret)
1517 goto out;
Chao Yu75cd4e02015-05-06 13:11:13 +08001518
Chao Yu6e961942016-05-09 19:56:31 +08001519 index = end;
Chao Yu75cd4e02015-05-06 13:11:13 +08001520 new_size = max_t(loff_t, new_size,
Chao Yu6e961942016-05-09 19:56:31 +08001521 (loff_t)index << PAGE_SHIFT);
Chao Yu75cd4e02015-05-06 13:11:13 +08001522 }
1523
1524 if (off_end) {
Chao Yu63943282015-08-07 18:36:06 +08001525 ret = fill_zero(inode, pg_end, 0, off_end);
1526 if (ret)
1527 goto out;
1528
Chao Yu75cd4e02015-05-06 13:11:13 +08001529 new_size = max_t(loff_t, new_size, offset + len);
1530 }
1531 }
1532
1533out:
Chao Yu17cd07a2018-02-25 23:38:21 +08001534 if (new_size > i_size_read(inode)) {
1535 if (mode & FALLOC_FL_KEEP_SIZE)
1536 file_set_keep_isize(inode);
1537 else
1538 f2fs_i_size_write(inode, new_size);
1539 }
Chao Yu75cd4e02015-05-06 13:11:13 +08001540 return ret;
1541}
1542
Chao Yuf62185d2015-05-28 19:16:57 +08001543static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1544{
1545 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001546 pgoff_t nr, pg_start, pg_end, delta, idx;
Chao Yuf62185d2015-05-28 19:16:57 +08001547 loff_t new_size;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001548 int ret = 0;
Chao Yuf62185d2015-05-28 19:16:57 +08001549
Chao Yuf62185d2015-05-28 19:16:57 +08001550 new_size = i_size_read(inode) + len;
Kinglong Mee46e82fb2017-03-10 17:54:52 +08001551 ret = inode_newsize_ok(inode, new_size);
1552 if (ret)
1553 return ret;
Chao Yuf62185d2015-05-28 19:16:57 +08001554
1555 if (offset >= i_size_read(inode))
1556 return -EINVAL;
1557
1558 /* insert range should be aligned to block size of f2fs. */
1559 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1560 return -EINVAL;
1561
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001562 ret = f2fs_convert_inline_inode(inode);
1563 if (ret)
1564 return ret;
Jaegeuk Kim97a7b2c2015-06-17 13:59:05 -07001565
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08001566 f2fs_balance_fs(sbi, true);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001567
Qiuyang Sun5a3a2d82017-05-18 11:06:45 +08001568 down_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yuc42d28c2019-02-02 17:33:01 +08001569 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001570 up_write(&F2FS_I(inode)->i_mmap_sem);
Chao Yuf62185d2015-05-28 19:16:57 +08001571 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001572 return ret;
Chao Yuf62185d2015-05-28 19:16:57 +08001573
1574 /* write out all dirty pages from offset */
1575 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1576 if (ret)
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001577 return ret;
Chao Yuf62185d2015-05-28 19:16:57 +08001578
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001579 pg_start = offset >> PAGE_SHIFT;
1580 pg_end = (offset + len) >> PAGE_SHIFT;
Chao Yuf62185d2015-05-28 19:16:57 +08001581 delta = pg_end - pg_start;
Geert Uytterhoevenf91108b2019-06-20 16:42:08 +02001582 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
Chao Yuf62185d2015-05-28 19:16:57 +08001583
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001584 /* avoid gc operation during block exchange */
1585 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1586 down_write(&F2FS_I(inode)->i_mmap_sem);
1587 truncate_pagecache(inode, offset);
1588
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001589 while (!ret && idx > pg_start) {
1590 nr = idx - pg_start;
1591 if (nr > delta)
1592 nr = delta;
1593 idx -= nr;
1594
Chao Yuf62185d2015-05-28 19:16:57 +08001595 f2fs_lock_op(sbi);
Jaegeuk Kim5f281fa2016-07-12 11:07:52 -07001596 f2fs_drop_extent_tree(inode);
1597
Jaegeuk Kim0a2aa8f2016-07-08 17:42:21 -07001598 ret = __exchange_data_block(inode, inode, idx,
1599 idx + delta, nr, false);
Chao Yuf62185d2015-05-28 19:16:57 +08001600 f2fs_unlock_op(sbi);
1601 }
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001602 up_write(&F2FS_I(inode)->i_mmap_sem);
1603 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yuf62185d2015-05-28 19:16:57 +08001604
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001605 /* write out all moved pages, if possible */
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001606 down_write(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001607 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1608 truncate_pagecache(inode, offset);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09001609 up_write(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001610
1611 if (!ret)
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07001612 f2fs_i_size_write(inode, new_size);
Chao Yuf62185d2015-05-28 19:16:57 +08001613 return ret;
1614}
1615
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001616static int expand_inode_data(struct inode *inode, loff_t offset,
1617 loff_t len, int mode)
1618{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001619 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Hyunchul Leed5097be2017-11-28 09:23:00 +09001620 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
Chao Yuf9d6d052018-11-13 14:33:45 +08001621 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1622 .m_may_create = true };
Chao Yu88f2cfc2021-03-24 11:24:33 +08001623 pgoff_t pg_start, pg_end;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001624 loff_t new_size = i_size_read(inode);
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001625 loff_t off_end;
Chao Yu88f2cfc2021-03-24 11:24:33 +08001626 block_t expanded = 0;
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001627 int err;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001628
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001629 err = inode_newsize_ok(inode, (len + offset));
1630 if (err)
1631 return err;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001632
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001633 err = f2fs_convert_inline_inode(inode);
1634 if (err)
1635 return err;
Jaegeuk Kim9e09fc82013-12-27 12:28:59 +09001636
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08001637 f2fs_balance_fs(sbi, true);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001638
Chao Yu88f2cfc2021-03-24 11:24:33 +08001639 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001640 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001641 off_end = (offset + len) & (PAGE_SIZE - 1);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001642
Chao Yu88f2cfc2021-03-24 11:24:33 +08001643 map.m_lblk = pg_start;
1644 map.m_len = pg_end - pg_start;
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001645 if (off_end)
1646 map.m_len++;
Jaegeuk Kimead43272014-06-13 13:05:55 +09001647
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001648 if (!map.m_len)
1649 return 0;
Jaegeuk Kimcad38362019-06-26 18:23:05 -07001650
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001651 if (f2fs_is_pinned_file(inode)) {
Chao Yue1175f02021-03-05 17:56:01 +08001652 block_t sec_blks = BLKS_PER_SEC(sbi);
1653 block_t sec_len = roundup(map.m_len, sec_blks);
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001654
Chao Yue1175f02021-03-05 17:56:01 +08001655 map.m_len = sec_blks;
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001656next_alloc:
1657 if (has_not_enough_free_secs(sbi, 0,
1658 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
Chao Yufb24fea2020-01-14 19:36:50 +08001659 down_write(&sbi->gc_lock);
Chao Yu7dede8862021-02-20 17:35:40 +08001660 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001661 if (err && err != -ENODATA && err != -EAGAIN)
1662 goto out_err;
1663 }
1664
1665 down_write(&sbi->pin_sem);
Daeho Jeongfd612642020-05-27 13:02:31 +09001666
1667 f2fs_lock_op(sbi);
Chao Yu509f1012021-04-21 09:54:55 +08001668 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
Daeho Jeongfd612642020-05-27 13:02:31 +09001669 f2fs_unlock_op(sbi);
1670
Chao Yud0b9e422020-08-04 21:14:45 +08001671 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001672 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
Chao Yud0b9e422020-08-04 21:14:45 +08001673
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001674 up_write(&sbi->pin_sem);
1675
Chao Yu88f2cfc2021-03-24 11:24:33 +08001676 expanded += map.m_len;
Chao Yue1175f02021-03-05 17:56:01 +08001677 sec_len -= map.m_len;
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001678 map.m_lblk += map.m_len;
Chao Yue1175f02021-03-05 17:56:01 +08001679 if (!err && sec_len)
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001680 goto next_alloc;
1681
Chao Yu88f2cfc2021-03-24 11:24:33 +08001682 map.m_len = expanded;
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001683 } else {
1684 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
Chao Yu88f2cfc2021-03-24 11:24:33 +08001685 expanded = map.m_len;
Jaegeuk Kimf5a53ed2019-10-18 10:06:40 -07001686 }
1687out_err:
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001688 if (err) {
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001689 pgoff_t last_off;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001690
Chao Yu88f2cfc2021-03-24 11:24:33 +08001691 if (!expanded)
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001692 return err;
Jaegeuk Kim98397ff2014-06-13 13:07:31 +09001693
Chao Yu88f2cfc2021-03-24 11:24:33 +08001694 last_off = pg_start + expanded - 1;
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001695
1696 /* update new size to the failed position */
youngjun yoo1061fd42018-05-30 04:34:58 +09001697 new_size = (last_off == pg_end) ? offset + len :
Jaegeuk Kime12dd7b2016-05-06 15:30:38 -07001698 (loff_t)(last_off + 1) << PAGE_SHIFT;
1699 } else {
1700 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001701 }
1702
Chao Yue8ed90a2017-11-05 21:53:30 +08001703 if (new_size > i_size_read(inode)) {
1704 if (mode & FALLOC_FL_KEEP_SIZE)
1705 file_set_keep_isize(inode);
1706 else
1707 f2fs_i_size_write(inode, new_size);
1708 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001709
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001710 return err;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001711}
1712
1713static long f2fs_fallocate(struct file *file, int mode,
1714 loff_t offset, loff_t len)
1715{
Al Viro6131ffa2013-02-27 16:59:05 -05001716 struct inode *inode = file_inode(file);
Taehee Yoo587c0a42015-04-21 15:59:12 +09001717 long ret = 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001718
Jaegeuk Kim1f227a32017-10-23 23:48:49 +02001719 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1720 return -EIO;
Chao Yu00e09c02019-08-23 17:58:36 +08001721 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1722 return -ENOSPC;
Chao Yu4c8ff702019-11-01 18:07:14 +08001723 if (!f2fs_is_compress_backend_ready(inode))
1724 return -EOPNOTSUPP;
Jaegeuk Kim1f227a32017-10-23 23:48:49 +02001725
Chao Yuc9980122015-09-11 14:39:02 +08001726 /* f2fs only support ->fallocate for regular file */
1727 if (!S_ISREG(inode->i_mode))
1728 return -EINVAL;
1729
Chandan Rajendra62230e0d2018-12-12 15:20:11 +05301730 if (IS_ENCRYPTED(inode) &&
Chao Yuf62185d2015-05-28 19:16:57 +08001731 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07001732 return -EOPNOTSUPP;
1733
Chao Yu4c8ff702019-11-01 18:07:14 +08001734 if (f2fs_compressed_file(inode) &&
1735 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1736 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1737 return -EOPNOTSUPP;
1738
Chao Yub4ace332015-05-06 13:09:46 +08001739 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
Chao Yuf62185d2015-05-28 19:16:57 +08001740 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1741 FALLOC_FL_INSERT_RANGE))
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001742 return -EOPNOTSUPP;
1743
Al Viro59551022016-01-22 15:40:57 -05001744 inode_lock(inode);
Chao Yu3375f692014-01-28 10:29:26 +08001745
Taehee Yoo587c0a42015-04-21 15:59:12 +09001746 if (mode & FALLOC_FL_PUNCH_HOLE) {
1747 if (offset >= inode->i_size)
1748 goto out;
1749
Chao Yua66c7b22013-11-22 16:52:50 +08001750 ret = punch_hole(inode, offset, len);
Chao Yub4ace332015-05-06 13:09:46 +08001751 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1752 ret = f2fs_collapse_range(inode, offset, len);
Chao Yu75cd4e02015-05-06 13:11:13 +08001753 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1754 ret = f2fs_zero_range(inode, offset, len, mode);
Chao Yuf62185d2015-05-28 19:16:57 +08001755 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1756 ret = f2fs_insert_range(inode, offset, len);
Chao Yub4ace332015-05-06 13:09:46 +08001757 } else {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001758 ret = expand_inode_data(inode, offset, len, mode);
Chao Yub4ace332015-05-06 13:09:46 +08001759 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001760
Namjae Jeon3af60a42012-12-30 14:52:37 +09001761 if (!ret) {
Deepa Dinamani078cd822016-09-14 07:48:04 -07001762 inode->i_mtime = inode->i_ctime = current_time(inode);
Jaegeuk Kim7c457292016-10-14 11:51:23 -07001763 f2fs_mark_inode_dirty_sync(inode, false);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08001764 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Namjae Jeon3af60a42012-12-30 14:52:37 +09001765 }
Chao Yu3375f692014-01-28 10:29:26 +08001766
Taehee Yoo587c0a42015-04-21 15:59:12 +09001767out:
Al Viro59551022016-01-22 15:40:57 -05001768 inode_unlock(inode);
Chao Yu3375f692014-01-28 10:29:26 +08001769
Namjae Jeonc01e2852013-04-23 17:00:52 +09001770 trace_f2fs_fallocate(inode, mode, offset, len, ret);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001771 return ret;
1772}
1773
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001774static int f2fs_release_file(struct inode *inode, struct file *filp)
1775{
Jaegeuk Kimde5307e2016-04-11 11:51:51 -07001776 /*
1777 * f2fs_relase_file is called at every close calls. So we should
1778 * not drop any inmemory pages by close called by other process.
1779 */
1780 if (!(filp->f_mode & FMODE_WRITE) ||
1781 atomic_read(&inode->i_writecount) != 1)
1782 return 0;
1783
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001784 /* some remained atomic pages should discarded */
1785 if (f2fs_is_atomic_file(inode))
Chao Yu4d57b862018-05-30 00:20:41 +08001786 f2fs_drop_inmem_pages(inode);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001787 if (f2fs_is_volatile_file(inode)) {
Jaegeuk Kim91942322016-05-20 10:13:22 -07001788 set_inode_flag(inode, FI_DROP_CACHE);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001789 filemap_fdatawrite(inode->i_mapping);
Jaegeuk Kim91942322016-05-20 10:13:22 -07001790 clear_inode_flag(inode, FI_DROP_CACHE);
Chao Yudfa74282018-06-04 23:20:51 +08001791 clear_inode_flag(inode, FI_VOLATILE_FILE);
1792 stat_dec_volatile_write(inode);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001793 }
1794 return 0;
1795}
1796
Jaegeuk Kim7a10f012017-07-24 19:46:29 -07001797static int f2fs_file_flush(struct file *file, fl_owner_t id)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001798{
Jaegeuk Kim7a10f012017-07-24 19:46:29 -07001799 struct inode *inode = file_inode(file);
1800
1801 /*
1802 * If the process doing a transaction is crashed, we should do
1803 * roll-back. Otherwise, other reader/write can see corrupted database
1804 * until all the writers close its file. Since this should be done
1805 * before dropping file lock, it needs to do in ->flush.
1806 */
1807 if (f2fs_is_atomic_file(inode) &&
1808 F2FS_I(inode)->inmem_task == current)
Chao Yu4d57b862018-05-30 00:20:41 +08001809 f2fs_drop_inmem_pages(inode);
Jaegeuk Kim7a10f012017-07-24 19:46:29 -07001810 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001811}
1812
Eric Biggers36098552019-06-04 22:59:04 -07001813static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
Chao Yu2c1d0302017-07-29 00:32:52 +08001814{
1815 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kim99eabb92020-03-05 15:20:26 -08001816 u32 masked_flags = fi->i_flags & mask;
1817
Jaegeuk Kima7531032021-05-06 12:11:14 -07001818 /* mask can be shrunk by flags_valid selector */
1819 iflags &= mask;
Chao Yu2c1d0302017-07-29 00:32:52 +08001820
1821 /* Is it quota file? Do not allow user to mess with it */
1822 if (IS_NOQUOTA(inode))
1823 return -EPERM;
1824
Jaegeuk Kim99eabb92020-03-05 15:20:26 -08001825 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
Daniel Rosenberg2c2eb7a2019-07-23 16:05:29 -07001826 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1827 return -EOPNOTSUPP;
1828 if (!f2fs_empty_dir(inode))
1829 return -ENOTEMPTY;
1830 }
1831
Chao Yu4c8ff702019-11-01 18:07:14 +08001832 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1833 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1834 return -EOPNOTSUPP;
1835 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1836 return -EINVAL;
1837 }
1838
Jaegeuk Kim99eabb92020-03-05 15:20:26 -08001839 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
Chao Yuaa576972020-03-27 18:29:51 +08001840 if (masked_flags & F2FS_COMPR_FL) {
Daeho Jeong78134d02020-09-08 11:44:11 +09001841 if (!f2fs_disable_compressed_file(inode))
Chao Yu2536ac62020-03-10 20:50:09 +08001842 return -EINVAL;
1843 }
Chao Yu4c8ff702019-11-01 18:07:14 +08001844 if (iflags & F2FS_NOCOMP_FL)
1845 return -EINVAL;
1846 if (iflags & F2FS_COMPR_FL) {
Chao Yu4c8ff702019-11-01 18:07:14 +08001847 if (!f2fs_may_compress(inode))
1848 return -EINVAL;
Chao Yu519a5a22020-09-18 11:03:49 +08001849 if (S_ISREG(inode->i_mode) && inode->i_size)
1850 return -EINVAL;
Chao Yu4c8ff702019-11-01 18:07:14 +08001851
1852 set_compress_context(inode);
1853 }
1854 }
Jaegeuk Kim99eabb92020-03-05 15:20:26 -08001855 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1856 if (masked_flags & F2FS_COMPR_FL)
Chao Yu4c8ff702019-11-01 18:07:14 +08001857 return -EINVAL;
1858 }
1859
Eric Biggersd5e5efa2019-07-01 13:26:30 -07001860 fi->i_flags = iflags | (fi->i_flags & ~mask);
Chao Yu4c8ff702019-11-01 18:07:14 +08001861 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1862 (fi->i_flags & F2FS_NOCOMP_FL));
Chao Yu2c1d0302017-07-29 00:32:52 +08001863
Chao Yu59c84402018-04-03 15:08:17 +08001864 if (fi->i_flags & F2FS_PROJINHERIT_FL)
Chao Yu2c1d0302017-07-29 00:32:52 +08001865 set_inode_flag(inode, FI_PROJ_INHERIT);
1866 else
1867 clear_inode_flag(inode, FI_PROJ_INHERIT);
1868
1869 inode->i_ctime = current_time(inode);
1870 f2fs_set_inode_flags(inode);
Chao Yub32e0192018-12-18 19:20:17 +08001871 f2fs_mark_inode_dirty_sync(inode, true);
Chao Yu2c1d0302017-07-29 00:32:52 +08001872 return 0;
1873}
1874
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02001875/* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
Eric Biggers36098552019-06-04 22:59:04 -07001876
1877/*
1878 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1879 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1880 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1881 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02001882 *
1883 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1884 * FS_IOC_FSSETXATTR is done by the VFS.
Eric Biggers36098552019-06-04 22:59:04 -07001885 */
1886
1887static const struct {
1888 u32 iflag;
1889 u32 fsflag;
1890} f2fs_fsflags_map[] = {
Chao Yu4c8ff702019-11-01 18:07:14 +08001891 { F2FS_COMPR_FL, FS_COMPR_FL },
Eric Biggers36098552019-06-04 22:59:04 -07001892 { F2FS_SYNC_FL, FS_SYNC_FL },
1893 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1894 { F2FS_APPEND_FL, FS_APPEND_FL },
1895 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1896 { F2FS_NOATIME_FL, FS_NOATIME_FL },
Chao Yu4c8ff702019-11-01 18:07:14 +08001897 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
Eric Biggers36098552019-06-04 22:59:04 -07001898 { F2FS_INDEX_FL, FS_INDEX_FL },
1899 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1900 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
Daniel Rosenberg2c2eb7a2019-07-23 16:05:29 -07001901 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
Eric Biggers36098552019-06-04 22:59:04 -07001902};
1903
1904#define F2FS_GETTABLE_FS_FL ( \
Chao Yu4c8ff702019-11-01 18:07:14 +08001905 FS_COMPR_FL | \
Eric Biggers36098552019-06-04 22:59:04 -07001906 FS_SYNC_FL | \
1907 FS_IMMUTABLE_FL | \
1908 FS_APPEND_FL | \
1909 FS_NODUMP_FL | \
1910 FS_NOATIME_FL | \
Chao Yu4c8ff702019-11-01 18:07:14 +08001911 FS_NOCOMP_FL | \
Eric Biggers36098552019-06-04 22:59:04 -07001912 FS_INDEX_FL | \
1913 FS_DIRSYNC_FL | \
1914 FS_PROJINHERIT_FL | \
1915 FS_ENCRYPT_FL | \
1916 FS_INLINE_DATA_FL | \
Eric Biggers95ae2512019-07-22 09:26:24 -07001917 FS_NOCOW_FL | \
Linus Torvaldsfbc246a2019-09-21 14:26:33 -07001918 FS_VERITY_FL | \
Daniel Rosenberg2c2eb7a2019-07-23 16:05:29 -07001919 FS_CASEFOLD_FL)
Eric Biggers36098552019-06-04 22:59:04 -07001920
1921#define F2FS_SETTABLE_FS_FL ( \
Chao Yu4c8ff702019-11-01 18:07:14 +08001922 FS_COMPR_FL | \
Eric Biggers36098552019-06-04 22:59:04 -07001923 FS_SYNC_FL | \
1924 FS_IMMUTABLE_FL | \
1925 FS_APPEND_FL | \
1926 FS_NODUMP_FL | \
1927 FS_NOATIME_FL | \
Chao Yu4c8ff702019-11-01 18:07:14 +08001928 FS_NOCOMP_FL | \
Eric Biggers36098552019-06-04 22:59:04 -07001929 FS_DIRSYNC_FL | \
Daniel Rosenberg2c2eb7a2019-07-23 16:05:29 -07001930 FS_PROJINHERIT_FL | \
1931 FS_CASEFOLD_FL)
Eric Biggers36098552019-06-04 22:59:04 -07001932
1933/* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1934static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1935{
1936 u32 fsflags = 0;
1937 int i;
1938
1939 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1940 if (iflags & f2fs_fsflags_map[i].iflag)
1941 fsflags |= f2fs_fsflags_map[i].fsflag;
1942
1943 return fsflags;
1944}
1945
1946/* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1947static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1948{
1949 u32 iflags = 0;
1950 int i;
1951
1952 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1953 if (fsflags & f2fs_fsflags_map[i].fsflag)
1954 iflags |= f2fs_fsflags_map[i].iflag;
1955
1956 return iflags;
1957}
1958
Chao Yud49f3e82015-01-23 20:36:04 +08001959static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1960{
1961 struct inode *inode = file_inode(filp);
1962
1963 return put_user(inode->i_generation, (int __user *)arg);
1964}
1965
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001966static int f2fs_ioc_start_atomic_write(struct file *filp)
1967{
1968 struct inode *inode = file_inode(filp);
Jaegeuk Kim743b6202019-09-09 13:10:59 +01001969 struct f2fs_inode_info *fi = F2FS_I(inode);
1970 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuf4c9c742015-07-17 18:06:35 +08001971 int ret;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001972
Christian Brauner21cb47b2021-01-21 14:19:25 +01001973 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001974 return -EACCES;
1975
Jaegeuk Kime8118982017-03-17 10:04:15 +08001976 if (!S_ISREG(inode->i_mode))
1977 return -EINVAL;
1978
Chao Yu038d0692019-07-25 22:39:11 +08001979 if (filp->f_flags & O_DIRECT)
1980 return -EINVAL;
1981
Chao Yu7fb17fe2016-05-09 19:56:32 +08001982 ret = mnt_want_write_file(filp);
1983 if (ret)
1984 return ret;
1985
Chao Yu0fac5582016-05-09 19:56:33 +08001986 inode_lock(inode);
1987
Chao Yu4c8ff702019-11-01 18:07:14 +08001988 f2fs_disable_compressed_file(inode);
1989
Jaegeuk Kim455e3a52018-07-27 18:15:11 +09001990 if (f2fs_is_atomic_file(inode)) {
1991 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
1992 ret = -EINVAL;
Chao Yu7fb17fe2016-05-09 19:56:32 +08001993 goto out;
Jaegeuk Kim455e3a52018-07-27 18:15:11 +09001994 }
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001995
Chao Yuf4c9c742015-07-17 18:06:35 +08001996 ret = f2fs_convert_inline_inode(inode);
1997 if (ret)
Chao Yu7fb17fe2016-05-09 19:56:32 +08001998 goto out;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001999
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002000 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2001
Jaegeuk Kim31867b22018-12-28 11:00:38 -08002002 /*
2003 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2004 * f2fs_is_atomic_file.
2005 */
2006 if (get_dirty_pages(inode))
Joe Perchesdcbb4c12019-06-18 17:48:42 +08002007 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2008 inode->i_ino, get_dirty_pages(inode));
Jaegeuk Kimc27753d2016-04-12 14:36:11 -07002009 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002010 if (ret) {
2011 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Kinglong Mee684ca7e2017-03-18 09:20:55 +08002012 goto out;
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002013 }
Jaegeuk Kim31867b22018-12-28 11:00:38 -08002014
Jaegeuk Kim743b6202019-09-09 13:10:59 +01002015 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2016 if (list_empty(&fi->inmem_ilist))
2017 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
Sahitya Tummala677017d2019-11-13 16:01:03 +05302018 sbi->atomic_files++;
Jaegeuk Kim743b6202019-09-09 13:10:59 +01002019 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2020
2021 /* add inode in inmem_list first and set atomic_file */
Yunlei He054afda2018-04-18 11:06:39 +08002022 set_inode_flag(inode, FI_ATOMIC_FILE);
Chao Yu2ef79ec2018-05-07 20:28:54 +08002023 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002024 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Kinglong Mee684ca7e2017-03-18 09:20:55 +08002025
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002026 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kim7a10f012017-07-24 19:46:29 -07002027 F2FS_I(inode)->inmem_task = current;
Jaegeuk Kim26a28a02016-12-28 13:55:09 -08002028 stat_update_max_atomic_write(inode);
Kinglong Mee684ca7e2017-03-18 09:20:55 +08002029out:
Chao Yu0fac5582016-05-09 19:56:33 +08002030 inode_unlock(inode);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002031 mnt_drop_write_file(filp);
Jaegeuk Kimc27753d2016-04-12 14:36:11 -07002032 return ret;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002033}
2034
2035static int f2fs_ioc_commit_atomic_write(struct file *filp)
2036{
2037 struct inode *inode = file_inode(filp);
2038 int ret;
2039
Christian Brauner21cb47b2021-01-21 14:19:25 +01002040 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002041 return -EACCES;
2042
2043 ret = mnt_want_write_file(filp);
2044 if (ret)
2045 return ret;
2046
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002047 f2fs_balance_fs(F2FS_I_SB(inode), true);
Chao Yu0fac5582016-05-09 19:56:33 +08002048
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002049 inode_lock(inode);
Chao Yu1dc0f892018-02-27 22:45:24 +08002050
Chao Yub169c3c2018-04-18 17:45:02 +08002051 if (f2fs_is_volatile_file(inode)) {
2052 ret = -EINVAL;
Chao Yu7fb17fe2016-05-09 19:56:32 +08002053 goto err_out;
Chao Yub169c3c2018-04-18 17:45:02 +08002054 }
Chao Yu7fb17fe2016-05-09 19:56:32 +08002055
Jaegeuk Kim6282adb2015-07-25 00:29:17 -07002056 if (f2fs_is_atomic_file(inode)) {
Chao Yu4d57b862018-05-30 00:20:41 +08002057 ret = f2fs_commit_inmem_pages(inode);
Chao Yu5fe45742017-01-07 18:50:26 +08002058 if (ret)
Jaegeuk Kimedb27de2015-07-25 00:52:52 -07002059 goto err_out;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002060
Jaegeuk Kim26a28a02016-12-28 13:55:09 -08002061 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
Jaegeuk Kim743b6202019-09-09 13:10:59 +01002062 if (!ret)
2063 f2fs_drop_inmem_pages(inode);
Jaegeuk Kim26a28a02016-12-28 13:55:09 -08002064 } else {
Chao Yu774e1b72017-08-23 18:23:25 +08002065 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002066 }
Jaegeuk Kimedb27de2015-07-25 00:52:52 -07002067err_out:
Chao Yu2ef79ec2018-05-07 20:28:54 +08002068 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2069 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2070 ret = -EINVAL;
2071 }
Chao Yu0fac5582016-05-09 19:56:33 +08002072 inode_unlock(inode);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002073 mnt_drop_write_file(filp);
2074 return ret;
2075}
2076
Jaegeuk Kim02a13352014-10-06 16:11:16 -07002077static int f2fs_ioc_start_volatile_write(struct file *filp)
2078{
2079 struct inode *inode = file_inode(filp);
Chao Yuf4c9c742015-07-17 18:06:35 +08002080 int ret;
Jaegeuk Kim02a13352014-10-06 16:11:16 -07002081
Christian Brauner21cb47b2021-01-21 14:19:25 +01002082 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim02a13352014-10-06 16:11:16 -07002083 return -EACCES;
2084
Chao Yu8ff09712017-03-17 15:43:57 +08002085 if (!S_ISREG(inode->i_mode))
2086 return -EINVAL;
2087
Chao Yu7fb17fe2016-05-09 19:56:32 +08002088 ret = mnt_want_write_file(filp);
Chao Yuf4c9c742015-07-17 18:06:35 +08002089 if (ret)
2090 return ret;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002091
Chao Yu0fac5582016-05-09 19:56:33 +08002092 inode_lock(inode);
2093
Chao Yu7fb17fe2016-05-09 19:56:32 +08002094 if (f2fs_is_volatile_file(inode))
2095 goto out;
2096
2097 ret = f2fs_convert_inline_inode(inode);
2098 if (ret)
2099 goto out;
2100
Chao Yu648d50b2017-03-22 17:23:45 +08002101 stat_inc_volatile_write(inode);
2102 stat_update_max_volatile_write(inode);
2103
Jaegeuk Kim91942322016-05-20 10:13:22 -07002104 set_inode_flag(inode, FI_VOLATILE_FILE);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002105 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002106out:
Chao Yu0fac5582016-05-09 19:56:33 +08002107 inode_unlock(inode);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002108 mnt_drop_write_file(filp);
2109 return ret;
Jaegeuk Kim02a13352014-10-06 16:11:16 -07002110}
2111
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002112static int f2fs_ioc_release_volatile_write(struct file *filp)
2113{
2114 struct inode *inode = file_inode(filp);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002115 int ret;
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002116
Christian Brauner21cb47b2021-01-21 14:19:25 +01002117 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002118 return -EACCES;
2119
Chao Yu7fb17fe2016-05-09 19:56:32 +08002120 ret = mnt_want_write_file(filp);
2121 if (ret)
2122 return ret;
2123
Chao Yu0fac5582016-05-09 19:56:33 +08002124 inode_lock(inode);
2125
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002126 if (!f2fs_is_volatile_file(inode))
Chao Yu7fb17fe2016-05-09 19:56:32 +08002127 goto out;
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002128
Chao Yu7fb17fe2016-05-09 19:56:32 +08002129 if (!f2fs_is_first_block_written(inode)) {
2130 ret = truncate_partial_data_page(inode, 0, true);
2131 goto out;
2132 }
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -07002133
Chao Yu7fb17fe2016-05-09 19:56:32 +08002134 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2135out:
Chao Yu0fac5582016-05-09 19:56:33 +08002136 inode_unlock(inode);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002137 mnt_drop_write_file(filp);
2138 return ret;
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002139}
2140
2141static int f2fs_ioc_abort_volatile_write(struct file *filp)
2142{
2143 struct inode *inode = file_inode(filp);
2144 int ret;
2145
Christian Brauner21cb47b2021-01-21 14:19:25 +01002146 if (!inode_owner_or_capable(&init_user_ns, inode))
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002147 return -EACCES;
2148
2149 ret = mnt_want_write_file(filp);
2150 if (ret)
2151 return ret;
2152
Chao Yu0fac5582016-05-09 19:56:33 +08002153 inode_lock(inode);
2154
Jaegeuk Kim26dc3d42016-04-11 13:15:10 -07002155 if (f2fs_is_atomic_file(inode))
Chao Yu4d57b862018-05-30 00:20:41 +08002156 f2fs_drop_inmem_pages(inode);
Jaegeuk Kim732d5642015-12-29 15:46:33 -08002157 if (f2fs_is_volatile_file(inode)) {
Jaegeuk Kim91942322016-05-20 10:13:22 -07002158 clear_inode_flag(inode, FI_VOLATILE_FILE);
Chao Yu648d50b2017-03-22 17:23:45 +08002159 stat_dec_volatile_write(inode);
Jaegeuk Kim608514d2016-04-15 09:43:17 -07002160 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
Jaegeuk Kim732d5642015-12-29 15:46:33 -08002161 }
Jaegeuk Kimde6a8ec2015-06-08 17:51:10 -07002162
Jaegeuk Kim455e3a52018-07-27 18:15:11 +09002163 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2164
Chao Yu0fac5582016-05-09 19:56:33 +08002165 inode_unlock(inode);
2166
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002167 mnt_drop_write_file(filp);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002168 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002169 return ret;
2170}
2171
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002172static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2173{
2174 struct inode *inode = file_inode(filp);
2175 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2176 struct super_block *sb = sbi->sb;
2177 __u32 in;
Dan Carpenter2a96d8a2018-06-20 13:39:53 +03002178 int ret = 0;
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002179
2180 if (!capable(CAP_SYS_ADMIN))
2181 return -EPERM;
2182
2183 if (get_user(in, (__u32 __user *)arg))
2184 return -EFAULT;
2185
Sahitya Tummala60b2b4e2018-05-18 11:51:52 +05302186 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2187 ret = mnt_want_write_file(filp);
Chao Yu86264412020-06-08 20:03:16 +08002188 if (ret) {
2189 if (ret == -EROFS) {
2190 ret = 0;
2191 f2fs_stop_checkpoint(sbi, false);
2192 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2193 trace_f2fs_shutdown(sbi, in, ret);
2194 }
Sahitya Tummala60b2b4e2018-05-18 11:51:52 +05302195 return ret;
Chao Yu86264412020-06-08 20:03:16 +08002196 }
Sahitya Tummala60b2b4e2018-05-18 11:51:52 +05302197 }
Chao Yu7fb17fe2016-05-09 19:56:32 +08002198
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002199 switch (in) {
2200 case F2FS_GOING_DOWN_FULLSYNC:
Christoph Hellwig040f04b2020-11-24 11:54:06 +01002201 ret = freeze_bdev(sb->s_bdev);
2202 if (ret)
Chao Yud027c482018-01-17 22:28:52 +08002203 goto out;
Christoph Hellwig040f04b2020-11-24 11:54:06 +01002204 f2fs_stop_checkpoint(sbi, false);
2205 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2206 thaw_bdev(sb->s_bdev);
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002207 break;
2208 case F2FS_GOING_DOWN_METASYNC:
2209 /* do checkpoint only */
Chao Yud027c482018-01-17 22:28:52 +08002210 ret = f2fs_sync_fs(sb, 1);
2211 if (ret)
2212 goto out;
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -07002213 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim83a3bfd2018-06-21 13:46:23 -07002214 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002215 break;
2216 case F2FS_GOING_DOWN_NOSYNC:
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -07002217 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim83a3bfd2018-06-21 13:46:23 -07002218 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002219 break;
Jaegeuk Kimc912a822015-10-07 09:46:37 -07002220 case F2FS_GOING_DOWN_METAFLUSH:
Chao Yu4d57b862018-05-30 00:20:41 +08002221 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -07002222 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim83a3bfd2018-06-21 13:46:23 -07002223 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
Jaegeuk Kimc912a822015-10-07 09:46:37 -07002224 break;
Jaegeuk Kim0cd6d9b2018-11-28 13:26:03 -08002225 case F2FS_GOING_DOWN_NEED_FSCK:
2226 set_sbi_flag(sbi, SBI_NEED_FSCK);
Jaegeuk Kimdb610a62019-01-24 17:48:38 -08002227 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2228 set_sbi_flag(sbi, SBI_IS_DIRTY);
Jaegeuk Kim0cd6d9b2018-11-28 13:26:03 -08002229 /* do checkpoint only */
2230 ret = f2fs_sync_fs(sb, 1);
Jaegeuk Kimdb610a62019-01-24 17:48:38 -08002231 goto out;
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002232 default:
Chao Yu7fb17fe2016-05-09 19:56:32 +08002233 ret = -EINVAL;
2234 goto out;
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002235 }
Chao Yu7950e9a2018-01-18 17:23:29 +08002236
Chao Yu4d57b862018-05-30 00:20:41 +08002237 f2fs_stop_gc_thread(sbi);
2238 f2fs_stop_discard_thread(sbi);
Chao Yu7950e9a2018-01-18 17:23:29 +08002239
Chao Yu4d57b862018-05-30 00:20:41 +08002240 f2fs_drop_discard_cmd(sbi);
Chao Yu7950e9a2018-01-18 17:23:29 +08002241 clear_opt(sbi, DISCARD);
2242
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002243 f2fs_update_time(sbi, REQ_TIME);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002244out:
Sahitya Tummala60b2b4e2018-05-18 11:51:52 +05302245 if (in != F2FS_GOING_DOWN_FULLSYNC)
2246 mnt_drop_write_file(filp);
Chao Yu559e87c2019-02-26 19:01:15 +08002247
2248 trace_f2fs_shutdown(sbi, in, ret);
2249
Chao Yu7fb17fe2016-05-09 19:56:32 +08002250 return ret;
Jaegeuk Kim1abff932015-01-08 19:15:53 -08002251}
2252
Jaegeuk Kim52656e62014-09-24 15:37:02 -07002253static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2254{
2255 struct inode *inode = file_inode(filp);
2256 struct super_block *sb = inode->i_sb;
2257 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2258 struct fstrim_range range;
2259 int ret;
2260
2261 if (!capable(CAP_SYS_ADMIN))
2262 return -EPERM;
2263
Chao Yu7d20c8a2018-09-04 03:52:17 +08002264 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
Jaegeuk Kim52656e62014-09-24 15:37:02 -07002265 return -EOPNOTSUPP;
2266
2267 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2268 sizeof(range)))
2269 return -EFAULT;
2270
Chao Yu7fb17fe2016-05-09 19:56:32 +08002271 ret = mnt_want_write_file(filp);
2272 if (ret)
2273 return ret;
2274
Jaegeuk Kim52656e62014-09-24 15:37:02 -07002275 range.minlen = max((unsigned int)range.minlen,
2276 q->limits.discard_granularity);
2277 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002278 mnt_drop_write_file(filp);
Jaegeuk Kim52656e62014-09-24 15:37:02 -07002279 if (ret < 0)
2280 return ret;
2281
2282 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2283 sizeof(range)))
2284 return -EFAULT;
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002285 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kim52656e62014-09-24 15:37:02 -07002286 return 0;
2287}
2288
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002289static bool uuid_is_nonzero(__u8 u[16])
2290{
2291 int i;
2292
2293 for (i = 0; i < 16; i++)
2294 if (u[i])
2295 return true;
2296 return false;
2297}
2298
2299static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2300{
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002301 struct inode *inode = file_inode(filp);
2302
Chao Yu7beb01f2018-10-24 18:34:26 +08002303 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
Chao Yuead710b2017-11-14 19:28:42 +08002304 return -EOPNOTSUPP;
2305
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002306 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002307
Eric Biggersdb717d82016-11-26 19:07:49 -05002308 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002309}
2310
2311static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2312{
Chao Yu7beb01f2018-10-24 18:34:26 +08002313 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
Chao Yuead710b2017-11-14 19:28:42 +08002314 return -EOPNOTSUPP;
Eric Biggersdb717d82016-11-26 19:07:49 -05002315 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002316}
2317
2318static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2319{
2320 struct inode *inode = file_inode(filp);
2321 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2322 int err;
2323
Chao Yu7beb01f2018-10-24 18:34:26 +08002324 if (!f2fs_sb_has_encrypt(sbi))
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002325 return -EOPNOTSUPP;
2326
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002327 err = mnt_want_write_file(filp);
2328 if (err)
2329 return err;
2330
Chao Yu846ae672018-02-26 22:04:13 +08002331 down_write(&sbi->sb_lock);
Chao Yud0d3f1b2018-02-11 22:53:20 +08002332
2333 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2334 goto got_it;
2335
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002336 /* update superblock with uuid */
2337 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2338
Chao Yuc5bda1c2015-06-08 13:28:03 +08002339 err = f2fs_commit_super(sbi, false);
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002340 if (err) {
2341 /* undo new data */
2342 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
Chao Yud0d3f1b2018-02-11 22:53:20 +08002343 goto out_err;
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002344 }
2345got_it:
2346 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2347 16))
Chao Yud0d3f1b2018-02-11 22:53:20 +08002348 err = -EFAULT;
2349out_err:
Chao Yu846ae672018-02-26 22:04:13 +08002350 up_write(&sbi->sb_lock);
Chao Yud0d3f1b2018-02-11 22:53:20 +08002351 mnt_drop_write_file(filp);
2352 return err;
Jaegeuk Kimf424f662015-04-20 15:19:06 -07002353}
2354
Eric Biggers8ce589c2019-08-04 19:35:48 -07002355static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2356 unsigned long arg)
2357{
2358 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2359 return -EOPNOTSUPP;
2360
2361 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2362}
2363
2364static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2365{
2366 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2367 return -EOPNOTSUPP;
2368
2369 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2370}
2371
2372static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2373{
2374 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2375 return -EOPNOTSUPP;
2376
2377 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2378}
2379
2380static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2381 unsigned long arg)
2382{
2383 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2384 return -EOPNOTSUPP;
2385
2386 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2387}
2388
2389static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2390 unsigned long arg)
2391{
2392 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2393 return -EOPNOTSUPP;
2394
2395 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2396}
2397
Eric Biggersee446e12020-03-14 13:50:51 -07002398static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2399{
2400 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2401 return -EOPNOTSUPP;
2402
2403 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2404}
2405
Chao Yuc1c1b582015-07-10 18:08:10 +08002406static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2407{
2408 struct inode *inode = file_inode(filp);
2409 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yud530d4d2015-10-05 22:22:44 +08002410 __u32 sync;
Chao Yu7fb17fe2016-05-09 19:56:32 +08002411 int ret;
Chao Yuc1c1b582015-07-10 18:08:10 +08002412
2413 if (!capable(CAP_SYS_ADMIN))
2414 return -EPERM;
2415
Chao Yud530d4d2015-10-05 22:22:44 +08002416 if (get_user(sync, (__u32 __user *)arg))
Chao Yuc1c1b582015-07-10 18:08:10 +08002417 return -EFAULT;
2418
Chao Yud530d4d2015-10-05 22:22:44 +08002419 if (f2fs_readonly(sbi->sb))
2420 return -EROFS;
Chao Yuc1c1b582015-07-10 18:08:10 +08002421
Chao Yu7fb17fe2016-05-09 19:56:32 +08002422 ret = mnt_want_write_file(filp);
2423 if (ret)
2424 return ret;
2425
Chao Yud530d4d2015-10-05 22:22:44 +08002426 if (!sync) {
Chao Yufb24fea2020-01-14 19:36:50 +08002427 if (!down_write_trylock(&sbi->gc_lock)) {
Chao Yu7fb17fe2016-05-09 19:56:32 +08002428 ret = -EBUSY;
2429 goto out;
2430 }
Chao Yud530d4d2015-10-05 22:22:44 +08002431 } else {
Chao Yufb24fea2020-01-14 19:36:50 +08002432 down_write(&sbi->gc_lock);
Chao Yuc1c1b582015-07-10 18:08:10 +08002433 }
2434
Chao Yu7dede8862021-02-20 17:35:40 +08002435 ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002436out:
2437 mnt_drop_write_file(filp);
2438 return ret;
Chao Yuc1c1b582015-07-10 18:08:10 +08002439}
2440
Chao Yu34178b1b2020-11-10 09:24:37 +08002441static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002442{
Chao Yu34178b1b2020-11-10 09:24:37 +08002443 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002444 u64 end;
2445 int ret;
2446
2447 if (!capable(CAP_SYS_ADMIN))
2448 return -EPERM;
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002449 if (f2fs_readonly(sbi->sb))
2450 return -EROFS;
2451
Chao Yu34178b1b2020-11-10 09:24:37 +08002452 end = range->start + range->len;
2453 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
Sahitya Tummalafbbf7792019-09-17 10:19:23 +05302454 end >= MAX_BLKADDR(sbi))
Yunlei Heb82f6e32018-04-24 11:40:30 +08002455 return -EINVAL;
Yunlei Heb82f6e32018-04-24 11:40:30 +08002456
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002457 ret = mnt_want_write_file(filp);
2458 if (ret)
2459 return ret;
2460
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002461do_more:
Chao Yu34178b1b2020-11-10 09:24:37 +08002462 if (!range->sync) {
Chao Yufb24fea2020-01-14 19:36:50 +08002463 if (!down_write_trylock(&sbi->gc_lock)) {
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002464 ret = -EBUSY;
2465 goto out;
2466 }
2467 } else {
Chao Yufb24fea2020-01-14 19:36:50 +08002468 down_write(&sbi->gc_lock);
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002469 }
2470
Chao Yu7dede8862021-02-20 17:35:40 +08002471 ret = f2fs_gc(sbi, range->sync, true, false,
2472 GET_SEGNO(sbi, range->start));
Qilong Zhang97767502020-06-28 19:23:03 +08002473 if (ret) {
2474 if (ret == -EBUSY)
2475 ret = -EAGAIN;
2476 goto out;
2477 }
Chao Yu34178b1b2020-11-10 09:24:37 +08002478 range->start += BLKS_PER_SEC(sbi);
2479 if (range->start <= end)
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07002480 goto do_more;
2481out:
2482 mnt_drop_write_file(filp);
2483 return ret;
2484}
2485
Chao Yu34178b1b2020-11-10 09:24:37 +08002486static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2487{
2488 struct f2fs_gc_range range;
2489
2490 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2491 sizeof(range)))
2492 return -EFAULT;
2493 return __f2fs_ioc_gc_range(filp, &range);
2494}
2495
Chao Yu059c0642018-07-17 20:41:49 +08002496static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
Chao Yu456b88e2015-10-05 22:24:19 +08002497{
2498 struct inode *inode = file_inode(filp);
2499 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu7fb17fe2016-05-09 19:56:32 +08002500 int ret;
Chao Yu456b88e2015-10-05 22:24:19 +08002501
2502 if (!capable(CAP_SYS_ADMIN))
2503 return -EPERM;
2504
2505 if (f2fs_readonly(sbi->sb))
2506 return -EROFS;
2507
Daniel Rosenberg43549942018-08-20 19:21:43 -07002508 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +08002509 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
Daniel Rosenberg43549942018-08-20 19:21:43 -07002510 return -EINVAL;
2511 }
2512
Chao Yu7fb17fe2016-05-09 19:56:32 +08002513 ret = mnt_want_write_file(filp);
2514 if (ret)
2515 return ret;
2516
2517 ret = f2fs_sync_fs(sbi->sb, 1);
2518
2519 mnt_drop_write_file(filp);
2520 return ret;
Chao Yu456b88e2015-10-05 22:24:19 +08002521}
2522
Chao Yud323d002015-10-27 09:53:45 +08002523static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2524 struct file *filp,
2525 struct f2fs_defragment *range)
2526{
2527 struct inode *inode = file_inode(filp);
Chao Yuf3d98e72018-01-10 18:18:52 +08002528 struct f2fs_map_blocks map = { .m_next_extent = NULL,
Yi Zhuang5f029c02021-04-06 09:47:35 +08002529 .m_seg_type = NO_CHECK_TYPE,
Jia Zhuf4f0b672018-11-20 04:29:35 +08002530 .m_may_create = false };
youngjun yoo1061fd42018-05-30 04:34:58 +09002531 struct extent_info ei = {0, 0, 0};
Chao Yuf3d98e72018-01-10 18:18:52 +08002532 pgoff_t pg_start, pg_end, next_pgofs;
Chao Yu3519e3f2015-12-01 11:56:52 +08002533 unsigned int blk_per_seg = sbi->blocks_per_seg;
Chao Yud323d002015-10-27 09:53:45 +08002534 unsigned int total = 0, sec_num;
Chao Yud323d002015-10-27 09:53:45 +08002535 block_t blk_end = 0;
2536 bool fragmented = false;
2537 int err;
2538
2539 /* if in-place-update policy is enabled, don't waste time here */
Chao Yu4d57b862018-05-30 00:20:41 +08002540 if (f2fs_should_update_inplace(inode, NULL))
Chao Yud323d002015-10-27 09:53:45 +08002541 return -EINVAL;
2542
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002543 pg_start = range->start >> PAGE_SHIFT;
2544 pg_end = (range->start + range->len) >> PAGE_SHIFT;
Chao Yud323d002015-10-27 09:53:45 +08002545
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08002546 f2fs_balance_fs(sbi, true);
Chao Yud323d002015-10-27 09:53:45 +08002547
Al Viro59551022016-01-22 15:40:57 -05002548 inode_lock(inode);
Chao Yud323d002015-10-27 09:53:45 +08002549
2550 /* writeback all dirty pages in the range */
2551 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
Fan Lid8fe4f02015-12-14 13:34:00 +08002552 range->start + range->len - 1);
Chao Yud323d002015-10-27 09:53:45 +08002553 if (err)
2554 goto out;
2555
2556 /*
2557 * lookup mapping info in extent cache, skip defragmenting if physical
2558 * block addresses are continuous.
2559 */
2560 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2561 if (ei.fofs + ei.len >= pg_end)
2562 goto out;
2563 }
2564
2565 map.m_lblk = pg_start;
Chao Yuf3d98e72018-01-10 18:18:52 +08002566 map.m_next_pgofs = &next_pgofs;
Chao Yud323d002015-10-27 09:53:45 +08002567
2568 /*
2569 * lookup mapping info in dnode page cache, skip defragmenting if all
2570 * physical block addresses are continuous even if there are hole(s)
2571 * in logical blocks.
2572 */
2573 while (map.m_lblk < pg_end) {
Fan Lia1c1e9b2015-12-15 17:02:41 +08002574 map.m_len = pg_end - map.m_lblk;
Qiuyang Sunf2220c72017-08-09 17:27:30 +08002575 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
Chao Yud323d002015-10-27 09:53:45 +08002576 if (err)
2577 goto out;
2578
2579 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
Chao Yuf3d98e72018-01-10 18:18:52 +08002580 map.m_lblk = next_pgofs;
Chao Yud323d002015-10-27 09:53:45 +08002581 continue;
2582 }
2583
Chao Yu25a912e2018-01-10 18:18:51 +08002584 if (blk_end && blk_end != map.m_pblk)
Chao Yud323d002015-10-27 09:53:45 +08002585 fragmented = true;
Chao Yu25a912e2018-01-10 18:18:51 +08002586
2587 /* record total count of block that we're going to move */
2588 total += map.m_len;
2589
Chao Yud323d002015-10-27 09:53:45 +08002590 blk_end = map.m_pblk + map.m_len;
2591
2592 map.m_lblk += map.m_len;
Chao Yud323d002015-10-27 09:53:45 +08002593 }
2594
Chao Yud3a1a0e2019-07-29 23:02:29 +08002595 if (!fragmented) {
2596 total = 0;
Chao Yud323d002015-10-27 09:53:45 +08002597 goto out;
Chao Yud3a1a0e2019-07-29 23:02:29 +08002598 }
Chao Yud323d002015-10-27 09:53:45 +08002599
Geert Uytterhoevenf91108b2019-06-20 16:42:08 +02002600 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
Chao Yud323d002015-10-27 09:53:45 +08002601
2602 /*
2603 * make sure there are enough free section for LFS allocation, this can
2604 * avoid defragment running in SSR mode when free section are allocated
2605 * intensively
2606 */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -07002607 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
Chao Yud323d002015-10-27 09:53:45 +08002608 err = -EAGAIN;
2609 goto out;
2610 }
2611
Chao Yu25a912e2018-01-10 18:18:51 +08002612 map.m_lblk = pg_start;
2613 map.m_len = pg_end - pg_start;
2614 total = 0;
2615
Chao Yud323d002015-10-27 09:53:45 +08002616 while (map.m_lblk < pg_end) {
2617 pgoff_t idx;
2618 int cnt = 0;
2619
2620do_map:
Fan Lia1c1e9b2015-12-15 17:02:41 +08002621 map.m_len = pg_end - map.m_lblk;
Qiuyang Sunf2220c72017-08-09 17:27:30 +08002622 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
Chao Yud323d002015-10-27 09:53:45 +08002623 if (err)
2624 goto clear_out;
2625
2626 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
Chao Yuf3d98e72018-01-10 18:18:52 +08002627 map.m_lblk = next_pgofs;
Chao Yud3a1a0e2019-07-29 23:02:29 +08002628 goto check;
Chao Yud323d002015-10-27 09:53:45 +08002629 }
2630
Jaegeuk Kim91942322016-05-20 10:13:22 -07002631 set_inode_flag(inode, FI_DO_DEFRAG);
Chao Yud323d002015-10-27 09:53:45 +08002632
2633 idx = map.m_lblk;
2634 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2635 struct page *page;
2636
Chao Yu4d57b862018-05-30 00:20:41 +08002637 page = f2fs_get_lock_data_page(inode, idx, true);
Chao Yud323d002015-10-27 09:53:45 +08002638 if (IS_ERR(page)) {
2639 err = PTR_ERR(page);
2640 goto clear_out;
2641 }
2642
2643 set_page_dirty(page);
2644 f2fs_put_page(page, 1);
2645
2646 idx++;
2647 cnt++;
2648 total++;
2649 }
2650
2651 map.m_lblk = idx;
Chao Yud3a1a0e2019-07-29 23:02:29 +08002652check:
2653 if (map.m_lblk < pg_end && cnt < blk_per_seg)
Chao Yud323d002015-10-27 09:53:45 +08002654 goto do_map;
2655
Jaegeuk Kim91942322016-05-20 10:13:22 -07002656 clear_inode_flag(inode, FI_DO_DEFRAG);
Chao Yud323d002015-10-27 09:53:45 +08002657
2658 err = filemap_fdatawrite(inode->i_mapping);
2659 if (err)
2660 goto out;
2661 }
2662clear_out:
Jaegeuk Kim91942322016-05-20 10:13:22 -07002663 clear_inode_flag(inode, FI_DO_DEFRAG);
Chao Yud323d002015-10-27 09:53:45 +08002664out:
Al Viro59551022016-01-22 15:40:57 -05002665 inode_unlock(inode);
Chao Yud323d002015-10-27 09:53:45 +08002666 if (!err)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002667 range->len = (u64)total << PAGE_SHIFT;
Chao Yud323d002015-10-27 09:53:45 +08002668 return err;
2669}
2670
2671static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2672{
2673 struct inode *inode = file_inode(filp);
2674 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2675 struct f2fs_defragment range;
2676 int err;
2677
2678 if (!capable(CAP_SYS_ADMIN))
2679 return -EPERM;
2680
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002681 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
Chao Yud323d002015-10-27 09:53:45 +08002682 return -EINVAL;
2683
Kinglong Meed7563862017-03-10 17:55:07 +08002684 if (f2fs_readonly(sbi->sb))
2685 return -EROFS;
2686
2687 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2688 sizeof(range)))
2689 return -EFAULT;
2690
2691 /* verify alignment of offset & size */
2692 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2693 return -EINVAL;
2694
2695 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
Chengguang Xu6d1451b2021-01-13 13:21:54 +08002696 max_file_blocks(inode)))
Kinglong Meed7563862017-03-10 17:55:07 +08002697 return -EINVAL;
2698
Chao Yud323d002015-10-27 09:53:45 +08002699 err = mnt_want_write_file(filp);
2700 if (err)
2701 return err;
2702
Chao Yud323d002015-10-27 09:53:45 +08002703 err = f2fs_defragment_range(sbi, filp, &range);
Kinglong Meed7563862017-03-10 17:55:07 +08002704 mnt_drop_write_file(filp);
2705
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002706 f2fs_update_time(sbi, REQ_TIME);
Chao Yud323d002015-10-27 09:53:45 +08002707 if (err < 0)
Kinglong Meed7563862017-03-10 17:55:07 +08002708 return err;
Chao Yud323d002015-10-27 09:53:45 +08002709
2710 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2711 sizeof(range)))
Kinglong Meed7563862017-03-10 17:55:07 +08002712 return -EFAULT;
2713
2714 return 0;
Chao Yud323d002015-10-27 09:53:45 +08002715}
2716
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002717static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2718 struct file *file_out, loff_t pos_out, size_t len)
2719{
2720 struct inode *src = file_inode(file_in);
2721 struct inode *dst = file_inode(file_out);
2722 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2723 size_t olen = len, dst_max_i_size = 0;
2724 size_t dst_osize;
2725 int ret;
2726
2727 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2728 src->i_sb != dst->i_sb)
2729 return -EXDEV;
2730
2731 if (unlikely(f2fs_readonly(src->i_sb)))
2732 return -EROFS;
2733
Chao Yufe8494b2016-08-04 20:13:02 +08002734 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2735 return -EINVAL;
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002736
Chandan Rajendra62230e0d2018-12-12 15:20:11 +05302737 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002738 return -EOPNOTSUPP;
2739
Dan Robertsonaad13832020-08-30 21:45:23 +00002740 if (pos_out < 0 || pos_in < 0)
2741 return -EINVAL;
2742
Fan Lid95fd912016-09-13 11:35:42 +08002743 if (src == dst) {
2744 if (pos_in == pos_out)
2745 return 0;
2746 if (pos_out > pos_in && pos_out < pos_in + len)
2747 return -EINVAL;
2748 }
2749
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002750 inode_lock(src);
Chao Yu20a3d612016-08-04 20:13:03 +08002751 if (src != dst) {
Chao Yubb066642017-11-03 10:21:05 +08002752 ret = -EBUSY;
2753 if (!inode_trylock(dst))
2754 goto out;
Chao Yu20a3d612016-08-04 20:13:03 +08002755 }
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002756
2757 ret = -EINVAL;
2758 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2759 goto out_unlock;
2760 if (len == 0)
2761 olen = len = src->i_size - pos_in;
2762 if (pos_in + len == src->i_size)
2763 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2764 if (len == 0) {
2765 ret = 0;
2766 goto out_unlock;
2767 }
2768
2769 dst_osize = dst->i_size;
2770 if (pos_out + olen > dst->i_size)
2771 dst_max_i_size = pos_out + olen;
2772
2773 /* verify the end result is block aligned */
2774 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2775 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2776 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2777 goto out_unlock;
2778
2779 ret = f2fs_convert_inline_inode(src);
2780 if (ret)
2781 goto out_unlock;
2782
2783 ret = f2fs_convert_inline_inode(dst);
2784 if (ret)
2785 goto out_unlock;
2786
2787 /* write out all dirty pages from offset */
2788 ret = filemap_write_and_wait_range(src->i_mapping,
2789 pos_in, pos_in + len);
2790 if (ret)
2791 goto out_unlock;
2792
2793 ret = filemap_write_and_wait_range(dst->i_mapping,
2794 pos_out, pos_out + len);
2795 if (ret)
2796 goto out_unlock;
2797
2798 f2fs_balance_fs(sbi, true);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002799
2800 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2801 if (src != dst) {
2802 ret = -EBUSY;
2803 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2804 goto out_src;
2805 }
2806
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002807 f2fs_lock_op(sbi);
Fan Li61e4da12016-09-10 11:19:37 +08002808 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2809 pos_out >> F2FS_BLKSIZE_BITS,
2810 len >> F2FS_BLKSIZE_BITS, false);
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002811
2812 if (!ret) {
2813 if (dst_max_i_size)
2814 f2fs_i_size_write(dst, dst_max_i_size);
2815 else if (dst_osize != dst->i_size)
2816 f2fs_i_size_write(dst, dst_osize);
2817 }
2818 f2fs_unlock_op(sbi);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002819
2820 if (src != dst)
Chao Yub2532c62018-04-24 10:55:28 +08002821 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002822out_src:
Chao Yub2532c62018-04-24 10:55:28 +08002823 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09002824out_unlock:
2825 if (src != dst)
2826 inode_unlock(dst);
2827out:
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002828 inode_unlock(src);
2829 return ret;
2830}
2831
Chao Yu34178b1b2020-11-10 09:24:37 +08002832static int __f2fs_ioc_move_range(struct file *filp,
2833 struct f2fs_move_range *range)
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002834{
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002835 struct fd dst;
2836 int err;
2837
2838 if (!(filp->f_mode & FMODE_READ) ||
2839 !(filp->f_mode & FMODE_WRITE))
2840 return -EBADF;
2841
Chao Yu34178b1b2020-11-10 09:24:37 +08002842 dst = fdget(range->dst_fd);
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002843 if (!dst.file)
2844 return -EBADF;
2845
2846 if (!(dst.file->f_mode & FMODE_WRITE)) {
2847 err = -EBADF;
2848 goto err_out;
2849 }
2850
2851 err = mnt_want_write_file(filp);
2852 if (err)
2853 goto err_out;
2854
Chao Yu34178b1b2020-11-10 09:24:37 +08002855 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2856 range->pos_out, range->len);
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002857
2858 mnt_drop_write_file(filp);
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07002859err_out:
2860 fdput(dst);
2861 return err;
2862}
2863
Chao Yu34178b1b2020-11-10 09:24:37 +08002864static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2865{
2866 struct f2fs_move_range range;
2867
2868 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2869 sizeof(range)))
2870 return -EFAULT;
2871 return __f2fs_ioc_move_range(filp, &range);
2872}
2873
Jaegeuk Kime066b832017-04-13 15:17:00 -07002874static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2875{
2876 struct inode *inode = file_inode(filp);
2877 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2878 struct sit_info *sm = SIT_I(sbi);
2879 unsigned int start_segno = 0, end_segno = 0;
2880 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2881 struct f2fs_flush_device range;
2882 int ret;
2883
2884 if (!capable(CAP_SYS_ADMIN))
2885 return -EPERM;
2886
2887 if (f2fs_readonly(sbi->sb))
2888 return -EROFS;
2889
Daniel Rosenberg43549942018-08-20 19:21:43 -07002890 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2891 return -EINVAL;
2892
Jaegeuk Kime066b832017-04-13 15:17:00 -07002893 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2894 sizeof(range)))
2895 return -EFAULT;
2896
Damien Le Moal09168782019-03-16 09:13:06 +09002897 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
Chao Yu2c70c5e2018-10-24 18:37:26 +08002898 __is_large_section(sbi)) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +08002899 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2900 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
Jaegeuk Kime066b832017-04-13 15:17:00 -07002901 return -EINVAL;
2902 }
2903
2904 ret = mnt_want_write_file(filp);
2905 if (ret)
2906 return ret;
2907
2908 if (range.dev_num != 0)
2909 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2910 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2911
2912 start_segno = sm->last_victim[FLUSH_DEVICE];
2913 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2914 start_segno = dev_start_segno;
2915 end_segno = min(start_segno + range.segments, dev_end_segno);
2916
2917 while (start_segno < end_segno) {
Chao Yufb24fea2020-01-14 19:36:50 +08002918 if (!down_write_trylock(&sbi->gc_lock)) {
Jaegeuk Kime066b832017-04-13 15:17:00 -07002919 ret = -EBUSY;
2920 goto out;
2921 }
2922 sm->last_victim[GC_CB] = end_segno + 1;
2923 sm->last_victim[GC_GREEDY] = end_segno + 1;
2924 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
Chao Yu7dede8862021-02-20 17:35:40 +08002925 ret = f2fs_gc(sbi, true, true, true, start_segno);
Jaegeuk Kime066b832017-04-13 15:17:00 -07002926 if (ret == -EAGAIN)
2927 ret = 0;
2928 else if (ret < 0)
2929 break;
2930 start_segno++;
2931 }
2932out:
2933 mnt_drop_write_file(filp);
2934 return ret;
2935}
2936
Jaegeuk Kime65ef202017-07-21 12:58:59 -07002937static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2938{
2939 struct inode *inode = file_inode(filp);
2940 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2941
2942 /* Must validate to set it with SQLite behavior in Android. */
2943 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2944
2945 return put_user(sb_feature, (u32 __user *)arg);
2946}
Jaegeuk Kime066b832017-04-13 15:17:00 -07002947
Chao Yu2c1d0302017-07-29 00:32:52 +08002948#ifdef CONFIG_QUOTA
Chao Yu78130812018-09-25 15:36:02 +08002949int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2950{
2951 struct dquot *transfer_to[MAXQUOTAS] = {};
2952 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2953 struct super_block *sb = sbi->sb;
2954 int err = 0;
2955
2956 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2957 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2958 err = __dquot_transfer(inode, transfer_to);
2959 if (err)
2960 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2961 dqput(transfer_to[PRJQUOTA]);
2962 }
2963 return err;
2964}
2965
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02002966static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
Chao Yu2c1d0302017-07-29 00:32:52 +08002967{
Chao Yu2c1d0302017-07-29 00:32:52 +08002968 struct f2fs_inode_info *fi = F2FS_I(inode);
2969 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu2c1d0302017-07-29 00:32:52 +08002970 struct page *ipage;
2971 kprojid_t kprojid;
2972 int err;
2973
Chao Yu7beb01f2018-10-24 18:34:26 +08002974 if (!f2fs_sb_has_project_quota(sbi)) {
Chao Yu2c1d0302017-07-29 00:32:52 +08002975 if (projid != F2FS_DEF_PROJID)
2976 return -EOPNOTSUPP;
2977 else
2978 return 0;
2979 }
2980
2981 if (!f2fs_has_extra_attr(inode))
2982 return -EOPNOTSUPP;
2983
2984 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2985
2986 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
2987 return 0;
2988
Chao Yu2c1d0302017-07-29 00:32:52 +08002989 err = -EPERM;
Chao Yu2c1d0302017-07-29 00:32:52 +08002990 /* Is it quota file? Do not allow user to mess with it */
2991 if (IS_NOQUOTA(inode))
Wang Shilongc8e92752018-09-11 08:54:21 +09002992 return err;
Chao Yu2c1d0302017-07-29 00:32:52 +08002993
Chao Yu4d57b862018-05-30 00:20:41 +08002994 ipage = f2fs_get_node_page(sbi, inode->i_ino);
Wang Shilongc8e92752018-09-11 08:54:21 +09002995 if (IS_ERR(ipage))
2996 return PTR_ERR(ipage);
Chao Yu2c1d0302017-07-29 00:32:52 +08002997
2998 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
2999 i_projid)) {
3000 err = -EOVERFLOW;
3001 f2fs_put_page(ipage, 1);
Wang Shilongc8e92752018-09-11 08:54:21 +09003002 return err;
Chao Yu2c1d0302017-07-29 00:32:52 +08003003 }
3004 f2fs_put_page(ipage, 1);
3005
Chao Yuc22aecd2018-04-21 17:53:52 +08003006 err = dquot_initialize(inode);
3007 if (err)
Wang Shilongc8e92752018-09-11 08:54:21 +09003008 return err;
Chao Yu2c1d0302017-07-29 00:32:52 +08003009
Chao Yu78130812018-09-25 15:36:02 +08003010 f2fs_lock_op(sbi);
3011 err = f2fs_transfer_project_quota(inode, kprojid);
3012 if (err)
3013 goto out_unlock;
Chao Yu2c1d0302017-07-29 00:32:52 +08003014
3015 F2FS_I(inode)->i_projid = kprojid;
3016 inode->i_ctime = current_time(inode);
Chao Yu2c1d0302017-07-29 00:32:52 +08003017 f2fs_mark_inode_dirty_sync(inode, true);
Chao Yu78130812018-09-25 15:36:02 +08003018out_unlock:
3019 f2fs_unlock_op(sbi);
Chao Yu2c1d0302017-07-29 00:32:52 +08003020 return err;
3021}
3022#else
Chao Yu78130812018-09-25 15:36:02 +08003023int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3024{
3025 return 0;
3026}
3027
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003028static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
Chao Yu2c1d0302017-07-29 00:32:52 +08003029{
3030 if (projid != F2FS_DEF_PROJID)
3031 return -EOPNOTSUPP;
3032 return 0;
3033}
3034#endif
3035
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003036int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
Eric Biggers36098552019-06-04 22:59:04 -07003037{
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003038 struct inode *inode = d_inode(dentry);
Eric Biggers6fc93c42019-07-01 13:26:29 -07003039 struct f2fs_inode_info *fi = F2FS_I(inode);
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003040 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
Eric Biggers6fc93c42019-07-01 13:26:29 -07003041
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003042 if (IS_ENCRYPTED(inode))
3043 fsflags |= FS_ENCRYPT_FL;
3044 if (IS_VERITY(inode))
3045 fsflags |= FS_VERITY_FL;
3046 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3047 fsflags |= FS_INLINE_DATA_FL;
3048 if (is_inode_flag_set(inode, FI_PIN_FILE))
3049 fsflags |= FS_NOCOW_FL;
3050
3051 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
Eric Biggers6fc93c42019-07-01 13:26:29 -07003052
3053 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3054 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
Eric Biggers6fc93c42019-07-01 13:26:29 -07003055
Chao Yu2c1d0302017-07-29 00:32:52 +08003056 return 0;
3057}
3058
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003059int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3060 struct dentry *dentry, struct fileattr *fa)
Chao Yu2c1d0302017-07-29 00:32:52 +08003061{
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003062 struct inode *inode = d_inode(dentry);
3063 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
Eric Biggers36098552019-06-04 22:59:04 -07003064 u32 iflags;
Chao Yu2c1d0302017-07-29 00:32:52 +08003065 int err;
3066
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003067 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3068 return -EIO;
3069 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3070 return -ENOSPC;
3071 if (fsflags & ~F2FS_GETTABLE_FS_FL)
Chao Yu2c1d0302017-07-29 00:32:52 +08003072 return -EOPNOTSUPP;
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003073 fsflags &= F2FS_SETTABLE_FS_FL;
3074 if (!fa->flags_valid)
3075 mask &= FS_COMMON_FL;
Chao Yu2c1d0302017-07-29 00:32:52 +08003076
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003077 iflags = f2fs_fsflags_to_iflags(fsflags);
Eric Biggers36098552019-06-04 22:59:04 -07003078 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
Chao Yu2c1d0302017-07-29 00:32:52 +08003079 return -EOPNOTSUPP;
3080
Miklos Szeredi9b1bb012021-04-07 14:36:43 +02003081 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3082 if (!err)
3083 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
Chao Yu2c1d0302017-07-29 00:32:52 +08003084
Wang Shilongc8e92752018-09-11 08:54:21 +09003085 return err;
Chao Yu2c1d0302017-07-29 00:32:52 +08003086}
Jaegeuk Kim52656e62014-09-24 15:37:02 -07003087
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003088int f2fs_pin_file_control(struct inode *inode, bool inc)
3089{
3090 struct f2fs_inode_info *fi = F2FS_I(inode);
3091 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3092
3093 /* Use i_gc_failures for normal file as a risk signal. */
3094 if (inc)
Chao Yu2ef79ec2018-05-07 20:28:54 +08003095 f2fs_i_gc_failures_write(inode,
3096 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003097
Chao Yu2ef79ec2018-05-07 20:28:54 +08003098 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
Joe Perchesdcbb4c12019-06-18 17:48:42 +08003099 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3100 __func__, inode->i_ino,
3101 fi->i_gc_failures[GC_FAILURE_PIN]);
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003102 clear_inode_flag(inode, FI_PIN_FILE);
3103 return -EAGAIN;
3104 }
3105 return 0;
3106}
3107
3108static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3109{
3110 struct inode *inode = file_inode(filp);
3111 __u32 pin;
3112 int ret = 0;
3113
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003114 if (get_user(pin, (__u32 __user *)arg))
3115 return -EFAULT;
3116
3117 if (!S_ISREG(inode->i_mode))
3118 return -EINVAL;
3119
3120 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3121 return -EROFS;
3122
3123 ret = mnt_want_write_file(filp);
3124 if (ret)
3125 return ret;
3126
3127 inode_lock(inode);
3128
Chao Yu4d57b862018-05-30 00:20:41 +08003129 if (f2fs_should_update_outplace(inode, NULL)) {
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08003130 ret = -EINVAL;
3131 goto out;
3132 }
3133
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003134 if (!pin) {
3135 clear_inode_flag(inode, FI_PIN_FILE);
Chao Yu30933362018-07-28 18:37:58 +08003136 f2fs_i_gc_failures_write(inode, 0);
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003137 goto done;
3138 }
3139
3140 if (f2fs_pin_file_control(inode, false)) {
3141 ret = -EAGAIN;
3142 goto out;
3143 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003144
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003145 ret = f2fs_convert_inline_inode(inode);
3146 if (ret)
3147 goto out;
3148
Daeho Jeong78134d02020-09-08 11:44:11 +09003149 if (!f2fs_disable_compressed_file(inode)) {
Chao Yu4c8ff702019-11-01 18:07:14 +08003150 ret = -EOPNOTSUPP;
3151 goto out;
3152 }
3153
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003154 set_inode_flag(inode, FI_PIN_FILE);
Chao Yu2ef79ec2018-05-07 20:28:54 +08003155 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003156done:
3157 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3158out:
3159 inode_unlock(inode);
3160 mnt_drop_write_file(filp);
3161 return ret;
3162}
3163
3164static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3165{
3166 struct inode *inode = file_inode(filp);
3167 __u32 pin = 0;
3168
3169 if (is_inode_flag_set(inode, FI_PIN_FILE))
Chao Yu2ef79ec2018-05-07 20:28:54 +08003170 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08003171 return put_user(pin, (u32 __user *)arg);
3172}
3173
Chao Yuc4020b22018-01-11 14:42:30 +08003174int f2fs_precache_extents(struct inode *inode)
3175{
3176 struct f2fs_inode_info *fi = F2FS_I(inode);
3177 struct f2fs_map_blocks map;
3178 pgoff_t m_next_extent;
3179 loff_t end;
3180 int err;
3181
3182 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3183 return -EOPNOTSUPP;
3184
3185 map.m_lblk = 0;
3186 map.m_next_pgofs = NULL;
3187 map.m_next_extent = &m_next_extent;
3188 map.m_seg_type = NO_CHECK_TYPE;
Jia Zhuf4f0b672018-11-20 04:29:35 +08003189 map.m_may_create = false;
Chengguang Xu6d1451b2021-01-13 13:21:54 +08003190 end = max_file_blocks(inode);
Chao Yuc4020b22018-01-11 14:42:30 +08003191
3192 while (map.m_lblk < end) {
3193 map.m_len = end - map.m_lblk;
3194
Chao Yub2532c62018-04-24 10:55:28 +08003195 down_write(&fi->i_gc_rwsem[WRITE]);
Chao Yuc4020b22018-01-11 14:42:30 +08003196 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
Chao Yub2532c62018-04-24 10:55:28 +08003197 up_write(&fi->i_gc_rwsem[WRITE]);
Chao Yuc4020b22018-01-11 14:42:30 +08003198 if (err)
3199 return err;
3200
3201 map.m_lblk = m_next_extent;
3202 }
3203
Tom Rix4f55dc22021-05-15 11:09:41 -07003204 return 0;
Chao Yuc4020b22018-01-11 14:42:30 +08003205}
3206
3207static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3208{
3209 return f2fs_precache_extents(file_inode(filp));
3210}
3211
Qiuyang Sun04f0b2e2019-06-05 11:33:25 +08003212static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3213{
3214 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3215 __u64 block_count;
Qiuyang Sun04f0b2e2019-06-05 11:33:25 +08003216
3217 if (!capable(CAP_SYS_ADMIN))
3218 return -EPERM;
3219
3220 if (f2fs_readonly(sbi->sb))
3221 return -EROFS;
3222
3223 if (copy_from_user(&block_count, (void __user *)arg,
3224 sizeof(block_count)))
3225 return -EFAULT;
3226
Jaegeuk Kimb4b10062020-03-31 11:43:07 -07003227 return f2fs_resize_fs(sbi, block_count);
Qiuyang Sun04f0b2e2019-06-05 11:33:25 +08003228}
3229
Eric Biggers95ae2512019-07-22 09:26:24 -07003230static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3231{
3232 struct inode *inode = file_inode(filp);
3233
3234 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3235
3236 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3237 f2fs_warn(F2FS_I_SB(inode),
Joe Perches833dcd32021-05-26 13:05:36 -07003238 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
Eric Biggers95ae2512019-07-22 09:26:24 -07003239 inode->i_ino);
3240 return -EOPNOTSUPP;
3241 }
3242
3243 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3244}
3245
3246static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3247{
3248 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3249 return -EOPNOTSUPP;
3250
3251 return fsverity_ioctl_measure(filp, (void __user *)arg);
3252}
3253
Eric Biggerse17fe652021-01-15 10:18:16 -08003254static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3255{
3256 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3257 return -EOPNOTSUPP;
3258
3259 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3260}
3261
Eric Biggers3357af82020-07-14 15:18:12 -07003262static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
Chao Yu4507847c2019-07-17 17:06:11 +08003263{
3264 struct inode *inode = file_inode(filp);
3265 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3266 char *vbuf;
3267 int count;
3268 int err = 0;
3269
3270 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3271 if (!vbuf)
3272 return -ENOMEM;
3273
3274 down_read(&sbi->sb_lock);
3275 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3276 ARRAY_SIZE(sbi->raw_super->volume_name),
3277 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3278 up_read(&sbi->sb_lock);
3279
3280 if (copy_to_user((char __user *)arg, vbuf,
3281 min(FSLABEL_MAX, count)))
3282 err = -EFAULT;
3283
Chao Yuc8eb7022020-09-14 16:47:00 +08003284 kfree(vbuf);
Chao Yu4507847c2019-07-17 17:06:11 +08003285 return err;
3286}
3287
Eric Biggers3357af82020-07-14 15:18:12 -07003288static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
Chao Yu4507847c2019-07-17 17:06:11 +08003289{
3290 struct inode *inode = file_inode(filp);
3291 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3292 char *vbuf;
3293 int err = 0;
3294
3295 if (!capable(CAP_SYS_ADMIN))
3296 return -EPERM;
3297
3298 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3299 if (IS_ERR(vbuf))
3300 return PTR_ERR(vbuf);
3301
3302 err = mnt_want_write_file(filp);
3303 if (err)
3304 goto out;
3305
3306 down_write(&sbi->sb_lock);
3307
3308 memset(sbi->raw_super->volume_name, 0,
3309 sizeof(sbi->raw_super->volume_name));
3310 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3311 sbi->raw_super->volume_name,
3312 ARRAY_SIZE(sbi->raw_super->volume_name));
3313
3314 err = f2fs_commit_super(sbi, false);
3315
3316 up_write(&sbi->sb_lock);
3317
3318 mnt_drop_write_file(filp);
3319out:
3320 kfree(vbuf);
3321 return err;
3322}
3323
Chao Yu439dfb12020-02-21 18:09:21 +08003324static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3325{
3326 struct inode *inode = file_inode(filp);
3327 __u64 blocks;
3328
3329 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3330 return -EOPNOTSUPP;
3331
3332 if (!f2fs_compressed_file(inode))
3333 return -EINVAL;
3334
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003335 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
Chao Yu439dfb12020-02-21 18:09:21 +08003336 return put_user(blocks, (u64 __user *)arg);
3337}
3338
Chao Yuef8d5632020-03-06 15:36:09 +08003339static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3340{
3341 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3342 unsigned int released_blocks = 0;
3343 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3344 block_t blkaddr;
3345 int i;
3346
3347 for (i = 0; i < count; i++) {
3348 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3349 dn->ofs_in_node + i);
3350
3351 if (!__is_valid_data_blkaddr(blkaddr))
3352 continue;
3353 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3354 DATA_GENERIC_ENHANCE)))
3355 return -EFSCORRUPTED;
3356 }
3357
3358 while (count) {
3359 int compr_blocks = 0;
3360
3361 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3362 blkaddr = f2fs_data_blkaddr(dn);
3363
3364 if (i == 0) {
3365 if (blkaddr == COMPRESS_ADDR)
3366 continue;
3367 dn->ofs_in_node += cluster_size;
3368 goto next;
3369 }
3370
3371 if (__is_valid_data_blkaddr(blkaddr))
3372 compr_blocks++;
3373
3374 if (blkaddr != NEW_ADDR)
3375 continue;
3376
3377 dn->data_blkaddr = NULL_ADDR;
3378 f2fs_set_data_blkaddr(dn);
3379 }
3380
3381 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3382 dec_valid_block_count(sbi, dn->inode,
3383 cluster_size - compr_blocks);
3384
3385 released_blocks += cluster_size - compr_blocks;
3386next:
3387 count -= cluster_size;
3388 }
3389
3390 return released_blocks;
3391}
3392
3393static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3394{
3395 struct inode *inode = file_inode(filp);
3396 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3397 pgoff_t page_idx = 0, last_idx;
3398 unsigned int released_blocks = 0;
3399 int ret;
3400 int writecount;
3401
3402 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3403 return -EOPNOTSUPP;
3404
3405 if (!f2fs_compressed_file(inode))
3406 return -EINVAL;
3407
3408 if (f2fs_readonly(sbi->sb))
3409 return -EROFS;
3410
3411 ret = mnt_want_write_file(filp);
3412 if (ret)
3413 return ret;
3414
3415 f2fs_balance_fs(F2FS_I_SB(inode), true);
3416
3417 inode_lock(inode);
3418
3419 writecount = atomic_read(&inode->i_writecount);
Daeho Jeong8c8cf262020-10-12 13:59:47 +09003420 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3421 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
Chao Yuef8d5632020-03-06 15:36:09 +08003422 ret = -EBUSY;
3423 goto out;
3424 }
3425
Jaegeuk Kimc6140412021-05-25 11:39:35 -07003426 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
Chao Yuef8d5632020-03-06 15:36:09 +08003427 ret = -EINVAL;
3428 goto out;
3429 }
3430
3431 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3432 if (ret)
3433 goto out;
3434
Jaegeuk Kimc6140412021-05-25 11:39:35 -07003435 set_inode_flag(inode, FI_COMPRESS_RELEASED);
Chao Yuef8d5632020-03-06 15:36:09 +08003436 inode->i_ctime = current_time(inode);
3437 f2fs_mark_inode_dirty_sync(inode, true);
3438
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003439 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
Daeho Jeong567c4bf2020-07-30 14:09:28 +09003440 goto out;
3441
Chao Yuef8d5632020-03-06 15:36:09 +08003442 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3443 down_write(&F2FS_I(inode)->i_mmap_sem);
3444
3445 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3446
3447 while (page_idx < last_idx) {
3448 struct dnode_of_data dn;
3449 pgoff_t end_offset, count;
3450
3451 set_new_dnode(&dn, inode, NULL, NULL, 0);
3452 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3453 if (ret) {
3454 if (ret == -ENOENT) {
3455 page_idx = f2fs_get_next_page_offset(&dn,
3456 page_idx);
3457 ret = 0;
3458 continue;
3459 }
3460 break;
3461 }
3462
3463 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3464 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
Chao Yu4fec3fc02020-04-08 19:55:17 +08003465 count = round_up(count, F2FS_I(inode)->i_cluster_size);
Chao Yuef8d5632020-03-06 15:36:09 +08003466
3467 ret = release_compress_blocks(&dn, count);
3468
3469 f2fs_put_dnode(&dn);
3470
3471 if (ret < 0)
3472 break;
3473
3474 page_idx += count;
3475 released_blocks += ret;
3476 }
3477
3478 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3479 up_write(&F2FS_I(inode)->i_mmap_sem);
3480out:
3481 inode_unlock(inode);
3482
3483 mnt_drop_write_file(filp);
3484
3485 if (ret >= 0) {
3486 ret = put_user(released_blocks, (u64 __user *)arg);
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003487 } else if (released_blocks &&
3488 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
Chao Yuef8d5632020-03-06 15:36:09 +08003489 set_sbi_flag(sbi, SBI_NEED_FSCK);
3490 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003491 "iblocks=%llu, released=%u, compr_blocks=%u, "
Chao Yuef8d5632020-03-06 15:36:09 +08003492 "run fsck to fix.",
3493 __func__, inode->i_ino, inode->i_blocks,
3494 released_blocks,
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003495 atomic_read(&F2FS_I(inode)->i_compr_blocks));
Chao Yuef8d5632020-03-06 15:36:09 +08003496 }
3497
3498 return ret;
3499}
3500
Chao Yuc75488f2020-03-06 14:35:33 +08003501static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3502{
3503 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3504 unsigned int reserved_blocks = 0;
3505 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3506 block_t blkaddr;
3507 int i;
3508
3509 for (i = 0; i < count; i++) {
3510 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3511 dn->ofs_in_node + i);
3512
3513 if (!__is_valid_data_blkaddr(blkaddr))
3514 continue;
3515 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3516 DATA_GENERIC_ENHANCE)))
3517 return -EFSCORRUPTED;
3518 }
3519
3520 while (count) {
3521 int compr_blocks = 0;
3522 blkcnt_t reserved;
3523 int ret;
3524
3525 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3526 blkaddr = f2fs_data_blkaddr(dn);
3527
3528 if (i == 0) {
3529 if (blkaddr == COMPRESS_ADDR)
3530 continue;
3531 dn->ofs_in_node += cluster_size;
3532 goto next;
3533 }
3534
3535 if (__is_valid_data_blkaddr(blkaddr)) {
3536 compr_blocks++;
3537 continue;
3538 }
3539
3540 dn->data_blkaddr = NEW_ADDR;
3541 f2fs_set_data_blkaddr(dn);
3542 }
3543
3544 reserved = cluster_size - compr_blocks;
3545 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3546 if (ret)
3547 return ret;
3548
3549 if (reserved != cluster_size - compr_blocks)
3550 return -ENOSPC;
3551
3552 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3553
3554 reserved_blocks += reserved;
3555next:
3556 count -= cluster_size;
3557 }
3558
3559 return reserved_blocks;
3560}
3561
3562static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3563{
3564 struct inode *inode = file_inode(filp);
3565 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3566 pgoff_t page_idx = 0, last_idx;
3567 unsigned int reserved_blocks = 0;
3568 int ret;
3569
3570 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3571 return -EOPNOTSUPP;
3572
3573 if (!f2fs_compressed_file(inode))
3574 return -EINVAL;
3575
3576 if (f2fs_readonly(sbi->sb))
3577 return -EROFS;
3578
3579 ret = mnt_want_write_file(filp);
3580 if (ret)
3581 return ret;
3582
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003583 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
Chao Yuc75488f2020-03-06 14:35:33 +08003584 goto out;
3585
3586 f2fs_balance_fs(F2FS_I_SB(inode), true);
3587
3588 inode_lock(inode);
3589
Jaegeuk Kimc6140412021-05-25 11:39:35 -07003590 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
Chao Yuc75488f2020-03-06 14:35:33 +08003591 ret = -EINVAL;
3592 goto unlock_inode;
3593 }
3594
3595 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3596 down_write(&F2FS_I(inode)->i_mmap_sem);
3597
3598 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3599
3600 while (page_idx < last_idx) {
3601 struct dnode_of_data dn;
3602 pgoff_t end_offset, count;
3603
3604 set_new_dnode(&dn, inode, NULL, NULL, 0);
3605 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3606 if (ret) {
3607 if (ret == -ENOENT) {
3608 page_idx = f2fs_get_next_page_offset(&dn,
3609 page_idx);
3610 ret = 0;
3611 continue;
3612 }
3613 break;
3614 }
3615
3616 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3617 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
Chao Yu4fec3fc02020-04-08 19:55:17 +08003618 count = round_up(count, F2FS_I(inode)->i_cluster_size);
Chao Yuc75488f2020-03-06 14:35:33 +08003619
3620 ret = reserve_compress_blocks(&dn, count);
3621
3622 f2fs_put_dnode(&dn);
3623
3624 if (ret < 0)
3625 break;
3626
3627 page_idx += count;
3628 reserved_blocks += ret;
3629 }
3630
3631 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3632 up_write(&F2FS_I(inode)->i_mmap_sem);
3633
3634 if (ret >= 0) {
Jaegeuk Kimc6140412021-05-25 11:39:35 -07003635 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
Chao Yuc75488f2020-03-06 14:35:33 +08003636 inode->i_ctime = current_time(inode);
3637 f2fs_mark_inode_dirty_sync(inode, true);
3638 }
3639unlock_inode:
3640 inode_unlock(inode);
3641out:
3642 mnt_drop_write_file(filp);
3643
3644 if (ret >= 0) {
3645 ret = put_user(reserved_blocks, (u64 __user *)arg);
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003646 } else if (reserved_blocks &&
3647 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
Chao Yuc75488f2020-03-06 14:35:33 +08003648 set_sbi_flag(sbi, SBI_NEED_FSCK);
3649 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003650 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
Chao Yuc75488f2020-03-06 14:35:33 +08003651 "run fsck to fix.",
3652 __func__, inode->i_ino, inode->i_blocks,
3653 reserved_blocks,
Daeho Jeongc2759eb2020-09-08 11:44:10 +09003654 atomic_read(&F2FS_I(inode)->i_compr_blocks));
Chao Yuc75488f2020-03-06 14:35:33 +08003655 }
3656
3657 return ret;
3658}
3659
Daeho Jeong9af84642020-07-21 12:21:11 +09003660static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3661 pgoff_t off, block_t block, block_t len, u32 flags)
3662{
3663 struct request_queue *q = bdev_get_queue(bdev);
3664 sector_t sector = SECTOR_FROM_BLOCK(block);
3665 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3666 int ret = 0;
3667
3668 if (!q)
3669 return -ENXIO;
3670
3671 if (flags & F2FS_TRIM_FILE_DISCARD)
3672 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3673 blk_queue_secure_erase(q) ?
3674 BLKDEV_DISCARD_SECURE : 0);
3675
3676 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3677 if (IS_ENCRYPTED(inode))
3678 ret = fscrypt_zeroout_range(inode, off, block, len);
3679 else
3680 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3681 GFP_NOFS, 0);
3682 }
3683
3684 return ret;
3685}
3686
3687static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3688{
3689 struct inode *inode = file_inode(filp);
3690 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3691 struct address_space *mapping = inode->i_mapping;
3692 struct block_device *prev_bdev = NULL;
3693 struct f2fs_sectrim_range range;
3694 pgoff_t index, pg_end, prev_index = 0;
3695 block_t prev_block = 0, len = 0;
3696 loff_t end_addr;
3697 bool to_end = false;
3698 int ret = 0;
3699
3700 if (!(filp->f_mode & FMODE_WRITE))
3701 return -EBADF;
3702
3703 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3704 sizeof(range)))
3705 return -EFAULT;
3706
3707 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3708 !S_ISREG(inode->i_mode))
3709 return -EINVAL;
3710
3711 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3712 !f2fs_hw_support_discard(sbi)) ||
3713 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3714 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3715 return -EOPNOTSUPP;
3716
3717 file_start_write(filp);
3718 inode_lock(inode);
3719
3720 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3721 range.start >= inode->i_size) {
3722 ret = -EINVAL;
3723 goto err;
3724 }
3725
3726 if (range.len == 0)
3727 goto err;
3728
3729 if (inode->i_size - range.start > range.len) {
3730 end_addr = range.start + range.len;
3731 } else {
3732 end_addr = range.len == (u64)-1 ?
3733 sbi->sb->s_maxbytes : inode->i_size;
3734 to_end = true;
3735 }
3736
3737 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3738 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3739 ret = -EINVAL;
3740 goto err;
3741 }
3742
3743 index = F2FS_BYTES_TO_BLK(range.start);
3744 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3745
3746 ret = f2fs_convert_inline_inode(inode);
3747 if (ret)
3748 goto err;
3749
3750 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3751 down_write(&F2FS_I(inode)->i_mmap_sem);
3752
3753 ret = filemap_write_and_wait_range(mapping, range.start,
3754 to_end ? LLONG_MAX : end_addr - 1);
3755 if (ret)
3756 goto out;
3757
3758 truncate_inode_pages_range(mapping, range.start,
3759 to_end ? -1 : end_addr - 1);
3760
3761 while (index < pg_end) {
3762 struct dnode_of_data dn;
3763 pgoff_t end_offset, count;
3764 int i;
3765
3766 set_new_dnode(&dn, inode, NULL, NULL, 0);
3767 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3768 if (ret) {
3769 if (ret == -ENOENT) {
3770 index = f2fs_get_next_page_offset(&dn, index);
3771 continue;
3772 }
3773 goto out;
3774 }
3775
3776 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3777 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3778 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3779 struct block_device *cur_bdev;
3780 block_t blkaddr = f2fs_data_blkaddr(&dn);
3781
3782 if (!__is_valid_data_blkaddr(blkaddr))
3783 continue;
3784
3785 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3786 DATA_GENERIC_ENHANCE)) {
3787 ret = -EFSCORRUPTED;
3788 f2fs_put_dnode(&dn);
3789 goto out;
3790 }
3791
3792 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3793 if (f2fs_is_multi_device(sbi)) {
3794 int di = f2fs_target_device_index(sbi, blkaddr);
3795
3796 blkaddr -= FDEV(di).start_blk;
3797 }
3798
3799 if (len) {
3800 if (prev_bdev == cur_bdev &&
3801 index == prev_index + len &&
3802 blkaddr == prev_block + len) {
3803 len++;
3804 } else {
3805 ret = f2fs_secure_erase(prev_bdev,
3806 inode, prev_index, prev_block,
3807 len, range.flags);
3808 if (ret) {
3809 f2fs_put_dnode(&dn);
3810 goto out;
3811 }
3812
3813 len = 0;
3814 }
3815 }
3816
3817 if (!len) {
3818 prev_bdev = cur_bdev;
3819 prev_index = index;
3820 prev_block = blkaddr;
3821 len = 1;
3822 }
3823 }
3824
3825 f2fs_put_dnode(&dn);
3826
3827 if (fatal_signal_pending(current)) {
3828 ret = -EINTR;
3829 goto out;
3830 }
3831 cond_resched();
3832 }
3833
3834 if (len)
3835 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3836 prev_block, len, range.flags);
3837out:
3838 up_write(&F2FS_I(inode)->i_mmap_sem);
3839 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3840err:
3841 inode_unlock(inode);
3842 file_end_write(filp);
3843
3844 return ret;
3845}
3846
Daeho Jeong9e2a5f82020-10-30 13:10:34 +09003847static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
Jaegeuk Kim52656e62014-09-24 15:37:02 -07003848{
Daeho Jeong9e2a5f82020-10-30 13:10:34 +09003849 struct inode *inode = file_inode(filp);
3850 struct f2fs_comp_option option;
Jaegeuk Kim1f227a32017-10-23 23:48:49 +02003851
Daeho Jeong9e2a5f82020-10-30 13:10:34 +09003852 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3853 return -EOPNOTSUPP;
3854
3855 inode_lock_shared(inode);
3856
3857 if (!f2fs_compressed_file(inode)) {
3858 inode_unlock_shared(inode);
3859 return -ENODATA;
3860 }
3861
3862 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3863 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3864
3865 inode_unlock_shared(inode);
3866
3867 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3868 sizeof(option)))
3869 return -EFAULT;
3870
3871 return 0;
3872}
3873
Daeho Jeonge1e8deb2020-10-30 13:10:35 +09003874static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3875{
3876 struct inode *inode = file_inode(filp);
3877 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3878 struct f2fs_comp_option option;
3879 int ret = 0;
3880
3881 if (!f2fs_sb_has_compression(sbi))
3882 return -EOPNOTSUPP;
3883
3884 if (!(filp->f_mode & FMODE_WRITE))
3885 return -EBADF;
3886
3887 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3888 sizeof(option)))
3889 return -EFAULT;
3890
3891 if (!f2fs_compressed_file(inode) ||
3892 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3893 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3894 option.algorithm >= COMPRESS_MAX)
3895 return -EINVAL;
3896
3897 file_start_write(filp);
3898 inode_lock(inode);
3899
3900 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3901 ret = -EBUSY;
3902 goto out;
3903 }
3904
3905 if (inode->i_size != 0) {
3906 ret = -EFBIG;
3907 goto out;
3908 }
3909
3910 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3911 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3912 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3913 f2fs_mark_inode_dirty_sync(inode, true);
3914
3915 if (!f2fs_is_compress_backend_ready(inode))
3916 f2fs_warn(sbi, "compression algorithm is successfully set, "
3917 "but current kernel doesn't support this algorithm.");
3918out:
3919 inode_unlock(inode);
3920 file_end_write(filp);
3921
3922 return ret;
3923}
3924
Daeho Jeong5fdb3222020-12-03 15:56:15 +09003925static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3926{
Matthew Wilcox (Oracle)fcd9ae42021-04-07 21:18:55 +01003927 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
Daeho Jeong5fdb3222020-12-03 15:56:15 +09003928 struct address_space *mapping = inode->i_mapping;
3929 struct page *page;
3930 pgoff_t redirty_idx = page_idx;
3931 int i, page_len = 0, ret = 0;
3932
3933 page_cache_ra_unbounded(&ractl, len, 0);
3934
3935 for (i = 0; i < len; i++, page_idx++) {
3936 page = read_cache_page(mapping, page_idx, NULL, NULL);
3937 if (IS_ERR(page)) {
3938 ret = PTR_ERR(page);
3939 break;
3940 }
3941 page_len++;
3942 }
3943
3944 for (i = 0; i < page_len; i++, redirty_idx++) {
3945 page = find_lock_page(mapping, redirty_idx);
Daeho Jeongdf0736d2021-01-06 08:49:28 +09003946 if (!page) {
3947 ret = -ENOMEM;
3948 break;
3949 }
Daeho Jeong5fdb3222020-12-03 15:56:15 +09003950 set_page_dirty(page);
3951 f2fs_put_page(page, 1);
3952 f2fs_put_page(page, 0);
3953 }
3954
3955 return ret;
3956}
3957
3958static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
3959{
3960 struct inode *inode = file_inode(filp);
3961 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3962 struct f2fs_inode_info *fi = F2FS_I(inode);
3963 pgoff_t page_idx = 0, last_idx;
3964 unsigned int blk_per_seg = sbi->blocks_per_seg;
3965 int cluster_size = F2FS_I(inode)->i_cluster_size;
3966 int count, ret;
3967
3968 if (!f2fs_sb_has_compression(sbi) ||
3969 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
3970 return -EOPNOTSUPP;
3971
3972 if (!(filp->f_mode & FMODE_WRITE))
3973 return -EBADF;
3974
3975 if (!f2fs_compressed_file(inode))
3976 return -EINVAL;
3977
3978 f2fs_balance_fs(F2FS_I_SB(inode), true);
3979
3980 file_start_write(filp);
3981 inode_lock(inode);
3982
3983 if (!f2fs_is_compress_backend_ready(inode)) {
3984 ret = -EOPNOTSUPP;
3985 goto out;
3986 }
3987
3988 if (f2fs_is_mmap_file(inode)) {
3989 ret = -EBUSY;
3990 goto out;
3991 }
3992
3993 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3994 if (ret)
3995 goto out;
3996
3997 if (!atomic_read(&fi->i_compr_blocks))
3998 goto out;
3999
4000 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4001
4002 count = last_idx - page_idx;
4003 while (count) {
4004 int len = min(cluster_size, count);
4005
4006 ret = redirty_blocks(inode, page_idx, len);
4007 if (ret < 0)
4008 break;
4009
4010 if (get_dirty_pages(inode) >= blk_per_seg)
4011 filemap_fdatawrite(inode->i_mapping);
4012
4013 count -= len;
4014 page_idx += len;
4015 }
4016
4017 if (!ret)
4018 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4019 LLONG_MAX);
4020
4021 if (ret)
Joe Perches833dcd32021-05-26 13:05:36 -07004022 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4023 __func__, ret);
Daeho Jeong5fdb3222020-12-03 15:56:15 +09004024out:
4025 inode_unlock(inode);
4026 file_end_write(filp);
4027
4028 return ret;
4029}
4030
4031static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4032{
4033 struct inode *inode = file_inode(filp);
4034 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4035 pgoff_t page_idx = 0, last_idx;
4036 unsigned int blk_per_seg = sbi->blocks_per_seg;
4037 int cluster_size = F2FS_I(inode)->i_cluster_size;
4038 int count, ret;
4039
4040 if (!f2fs_sb_has_compression(sbi) ||
4041 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4042 return -EOPNOTSUPP;
4043
4044 if (!(filp->f_mode & FMODE_WRITE))
4045 return -EBADF;
4046
4047 if (!f2fs_compressed_file(inode))
4048 return -EINVAL;
4049
4050 f2fs_balance_fs(F2FS_I_SB(inode), true);
4051
4052 file_start_write(filp);
4053 inode_lock(inode);
4054
4055 if (!f2fs_is_compress_backend_ready(inode)) {
4056 ret = -EOPNOTSUPP;
4057 goto out;
4058 }
4059
4060 if (f2fs_is_mmap_file(inode)) {
4061 ret = -EBUSY;
4062 goto out;
4063 }
4064
4065 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4066 if (ret)
4067 goto out;
4068
4069 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4070
4071 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4072
4073 count = last_idx - page_idx;
4074 while (count) {
4075 int len = min(cluster_size, count);
4076
4077 ret = redirty_blocks(inode, page_idx, len);
4078 if (ret < 0)
4079 break;
4080
4081 if (get_dirty_pages(inode) >= blk_per_seg)
4082 filemap_fdatawrite(inode->i_mapping);
4083
4084 count -= len;
4085 page_idx += len;
4086 }
4087
4088 if (!ret)
4089 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4090 LLONG_MAX);
4091
4092 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4093
4094 if (ret)
Joe Perches833dcd32021-05-26 13:05:36 -07004095 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4096 __func__, ret);
Daeho Jeong5fdb3222020-12-03 15:56:15 +09004097out:
4098 inode_unlock(inode);
4099 file_end_write(filp);
4100
4101 return ret;
4102}
4103
Chao Yu34178b1b2020-11-10 09:24:37 +08004104static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004105{
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004106 switch (cmd) {
Eric Biggers3357af82020-07-14 15:18:12 -07004107 case FS_IOC_GETVERSION:
Chao Yud49f3e82015-01-23 20:36:04 +08004108 return f2fs_ioc_getversion(filp, arg);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07004109 case F2FS_IOC_START_ATOMIC_WRITE:
4110 return f2fs_ioc_start_atomic_write(filp);
4111 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4112 return f2fs_ioc_commit_atomic_write(filp);
Jaegeuk Kim02a13352014-10-06 16:11:16 -07004113 case F2FS_IOC_START_VOLATILE_WRITE:
4114 return f2fs_ioc_start_volatile_write(filp);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08004115 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4116 return f2fs_ioc_release_volatile_write(filp);
4117 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4118 return f2fs_ioc_abort_volatile_write(filp);
Jaegeuk Kim1abff932015-01-08 19:15:53 -08004119 case F2FS_IOC_SHUTDOWN:
4120 return f2fs_ioc_shutdown(filp, arg);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07004121 case FITRIM:
Jaegeuk Kim52656e62014-09-24 15:37:02 -07004122 return f2fs_ioc_fitrim(filp, arg);
Eric Biggers3357af82020-07-14 15:18:12 -07004123 case FS_IOC_SET_ENCRYPTION_POLICY:
Jaegeuk Kimf424f662015-04-20 15:19:06 -07004124 return f2fs_ioc_set_encryption_policy(filp, arg);
Eric Biggers3357af82020-07-14 15:18:12 -07004125 case FS_IOC_GET_ENCRYPTION_POLICY:
Jaegeuk Kimf424f662015-04-20 15:19:06 -07004126 return f2fs_ioc_get_encryption_policy(filp, arg);
Eric Biggers3357af82020-07-14 15:18:12 -07004127 case FS_IOC_GET_ENCRYPTION_PWSALT:
Jaegeuk Kimf424f662015-04-20 15:19:06 -07004128 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
Eric Biggers8ce589c2019-08-04 19:35:48 -07004129 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4130 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4131 case FS_IOC_ADD_ENCRYPTION_KEY:
4132 return f2fs_ioc_add_encryption_key(filp, arg);
4133 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4134 return f2fs_ioc_remove_encryption_key(filp, arg);
4135 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4136 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4137 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4138 return f2fs_ioc_get_encryption_key_status(filp, arg);
Eric Biggersee446e12020-03-14 13:50:51 -07004139 case FS_IOC_GET_ENCRYPTION_NONCE:
4140 return f2fs_ioc_get_encryption_nonce(filp, arg);
Chao Yuc1c1b582015-07-10 18:08:10 +08004141 case F2FS_IOC_GARBAGE_COLLECT:
4142 return f2fs_ioc_gc(filp, arg);
Jaegeuk Kim34dc77a2017-06-15 16:44:42 -07004143 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4144 return f2fs_ioc_gc_range(filp, arg);
Chao Yu456b88e2015-10-05 22:24:19 +08004145 case F2FS_IOC_WRITE_CHECKPOINT:
Chao Yu059c0642018-07-17 20:41:49 +08004146 return f2fs_ioc_write_checkpoint(filp, arg);
Chao Yud323d002015-10-27 09:53:45 +08004147 case F2FS_IOC_DEFRAGMENT:
4148 return f2fs_ioc_defragment(filp, arg);
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07004149 case F2FS_IOC_MOVE_RANGE:
4150 return f2fs_ioc_move_range(filp, arg);
Jaegeuk Kime066b832017-04-13 15:17:00 -07004151 case F2FS_IOC_FLUSH_DEVICE:
4152 return f2fs_ioc_flush_device(filp, arg);
Jaegeuk Kime65ef202017-07-21 12:58:59 -07004153 case F2FS_IOC_GET_FEATURES:
4154 return f2fs_ioc_get_features(filp, arg);
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08004155 case F2FS_IOC_GET_PIN_FILE:
4156 return f2fs_ioc_get_pin_file(filp, arg);
4157 case F2FS_IOC_SET_PIN_FILE:
4158 return f2fs_ioc_set_pin_file(filp, arg);
Chao Yuc4020b22018-01-11 14:42:30 +08004159 case F2FS_IOC_PRECACHE_EXTENTS:
4160 return f2fs_ioc_precache_extents(filp, arg);
Qiuyang Sun04f0b2e2019-06-05 11:33:25 +08004161 case F2FS_IOC_RESIZE_FS:
4162 return f2fs_ioc_resize_fs(filp, arg);
Eric Biggers95ae2512019-07-22 09:26:24 -07004163 case FS_IOC_ENABLE_VERITY:
4164 return f2fs_ioc_enable_verity(filp, arg);
4165 case FS_IOC_MEASURE_VERITY:
4166 return f2fs_ioc_measure_verity(filp, arg);
Eric Biggerse17fe652021-01-15 10:18:16 -08004167 case FS_IOC_READ_VERITY_METADATA:
4168 return f2fs_ioc_read_verity_metadata(filp, arg);
Eric Biggers3357af82020-07-14 15:18:12 -07004169 case FS_IOC_GETFSLABEL:
4170 return f2fs_ioc_getfslabel(filp, arg);
4171 case FS_IOC_SETFSLABEL:
4172 return f2fs_ioc_setfslabel(filp, arg);
Chao Yu439dfb12020-02-21 18:09:21 +08004173 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4174 return f2fs_get_compress_blocks(filp, arg);
Chao Yuef8d5632020-03-06 15:36:09 +08004175 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4176 return f2fs_release_compress_blocks(filp, arg);
Chao Yuc75488f2020-03-06 14:35:33 +08004177 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4178 return f2fs_reserve_compress_blocks(filp, arg);
Daeho Jeong9af84642020-07-21 12:21:11 +09004179 case F2FS_IOC_SEC_TRIM_FILE:
4180 return f2fs_sec_trim_file(filp, arg);
Daeho Jeong9e2a5f82020-10-30 13:10:34 +09004181 case F2FS_IOC_GET_COMPRESS_OPTION:
4182 return f2fs_ioc_get_compress_option(filp, arg);
Daeho Jeonge1e8deb2020-10-30 13:10:35 +09004183 case F2FS_IOC_SET_COMPRESS_OPTION:
4184 return f2fs_ioc_set_compress_option(filp, arg);
Daeho Jeong5fdb3222020-12-03 15:56:15 +09004185 case F2FS_IOC_DECOMPRESS_FILE:
4186 return f2fs_ioc_decompress_file(filp, arg);
4187 case F2FS_IOC_COMPRESS_FILE:
4188 return f2fs_ioc_compress_file(filp, arg);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004189 default:
4190 return -ENOTTY;
4191 }
4192}
4193
Chao Yu34178b1b2020-11-10 09:24:37 +08004194long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4195{
4196 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4197 return -EIO;
4198 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4199 return -ENOSPC;
4200
4201 return __f2fs_ioctl(filp, cmd, arg);
4202}
4203
Chao Yu4c8ff702019-11-01 18:07:14 +08004204static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4205{
4206 struct file *file = iocb->ki_filp;
4207 struct inode *inode = file_inode(file);
Chao Yu8b83ac82020-04-16 18:16:56 +08004208 int ret;
Chao Yu4c8ff702019-11-01 18:07:14 +08004209
4210 if (!f2fs_is_compress_backend_ready(inode))
4211 return -EOPNOTSUPP;
4212
Chao Yu8b83ac82020-04-16 18:16:56 +08004213 ret = generic_file_read_iter(iocb, iter);
4214
4215 if (ret > 0)
4216 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4217
4218 return ret;
Chao Yu4c8ff702019-11-01 18:07:14 +08004219}
4220
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07004221static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4222{
Jaegeuk Kimb439b102016-02-03 13:09:09 -08004223 struct file *file = iocb->ki_filp;
4224 struct inode *inode = file_inode(file);
4225 ssize_t ret;
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07004226
Chao Yu126ce722019-04-02 18:52:22 +08004227 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4228 ret = -EIO;
4229 goto out;
4230 }
Jaegeuk Kim1f227a32017-10-23 23:48:49 +02004231
Chao Yu7bd29352020-02-24 19:20:15 +08004232 if (!f2fs_is_compress_backend_ready(inode)) {
4233 ret = -EOPNOTSUPP;
4234 goto out;
4235 }
Chao Yu4c8ff702019-11-01 18:07:14 +08004236
Goldwyn Rodriguescb8434f2019-09-11 11:45:17 -05004237 if (iocb->ki_flags & IOCB_NOWAIT) {
4238 if (!inode_trylock(inode)) {
Chao Yu126ce722019-04-02 18:52:22 +08004239 ret = -EAGAIN;
4240 goto out;
4241 }
Goldwyn Rodriguescb8434f2019-09-11 11:45:17 -05004242 } else {
Hyunchul Leeb91050a2018-03-08 19:34:38 +09004243 inode_lock(inode);
4244 }
4245
Chao Yue0fcd012020-12-26 18:07:01 +08004246 if (unlikely(IS_IMMUTABLE(inode))) {
4247 ret = -EPERM;
4248 goto unlock;
4249 }
4250
Jaegeuk Kimc6140412021-05-25 11:39:35 -07004251 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4252 ret = -EPERM;
4253 goto unlock;
4254 }
4255
Jaegeuk Kimb439b102016-02-03 13:09:09 -08004256 ret = generic_write_checks(iocb, from);
4257 if (ret > 0) {
Jaegeuk Kimdc7a10d2018-03-30 17:58:13 -07004258 bool preallocated = false;
4259 size_t target_size = 0;
Jaegeuk Kimdc91de72017-01-13 13:12:29 -08004260 int err;
Jaegeuk Kima7de6082016-11-11 16:31:56 -08004261
Jaegeuk Kimdc91de72017-01-13 13:12:29 -08004262 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4263 set_inode_flag(inode, FI_NO_PREALLOC);
4264
Chengguang Xud5d5f0c2019-04-23 13:08:35 +08004265 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4266 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
Hyunchul Leeb91050a2018-03-08 19:34:38 +09004267 iov_iter_count(from)) ||
Chengguang Xud5d5f0c2019-04-23 13:08:35 +08004268 f2fs_has_inline_data(inode) ||
4269 f2fs_force_buffered_io(inode, iocb, from)) {
4270 clear_inode_flag(inode, FI_NO_PREALLOC);
4271 inode_unlock(inode);
4272 ret = -EAGAIN;
4273 goto out;
4274 }
Jaegeuk Kim47501f82019-11-26 15:01:42 -08004275 goto write;
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07004276 }
Jaegeuk Kim47501f82019-11-26 15:01:42 -08004277
4278 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4279 goto write;
4280
4281 if (iocb->ki_flags & IOCB_DIRECT) {
4282 /*
4283 * Convert inline data for Direct I/O before entering
4284 * f2fs_direct_IO().
4285 */
4286 err = f2fs_convert_inline_inode(inode);
4287 if (err)
4288 goto out_err;
4289 /*
4290 * If force_buffere_io() is true, we have to allocate
4291 * blocks all the time, since f2fs_direct_IO will fall
4292 * back to buffered IO.
4293 */
4294 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4295 allow_outplace_dio(inode, iocb, from))
4296 goto write;
4297 }
4298 preallocated = true;
4299 target_size = iocb->ki_pos + iov_iter_count(from);
4300
4301 err = f2fs_preallocate_blocks(iocb, from);
4302 if (err) {
4303out_err:
4304 clear_inode_flag(inode, FI_NO_PREALLOC);
4305 inode_unlock(inode);
4306 ret = err;
4307 goto out;
4308 }
4309write:
Jaegeuk Kima7de6082016-11-11 16:31:56 -08004310 ret = __generic_file_write_iter(iocb, from);
Jaegeuk Kimdc91de72017-01-13 13:12:29 -08004311 clear_inode_flag(inode, FI_NO_PREALLOC);
Chao Yub0af6d42017-08-02 23:21:48 +08004312
Jaegeuk Kimdc7a10d2018-03-30 17:58:13 -07004313 /* if we couldn't write data, we should deallocate blocks. */
Chao Yua303b0a2021-04-01 11:01:53 +08004314 if (preallocated && i_size_read(inode) < target_size) {
4315 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4316 down_write(&F2FS_I(inode)->i_mmap_sem);
Jaegeuk Kimdc7a10d2018-03-30 17:58:13 -07004317 f2fs_truncate(inode);
Chao Yua303b0a2021-04-01 11:01:53 +08004318 up_write(&F2FS_I(inode)->i_mmap_sem);
4319 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4320 }
Jaegeuk Kimdc7a10d2018-03-30 17:58:13 -07004321
Chao Yub0af6d42017-08-02 23:21:48 +08004322 if (ret > 0)
4323 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
Jaegeuk Kimb439b102016-02-03 13:09:09 -08004324 }
Chao Yue0fcd012020-12-26 18:07:01 +08004325unlock:
Jaegeuk Kimb439b102016-02-03 13:09:09 -08004326 inode_unlock(inode);
Chao Yu126ce722019-04-02 18:52:22 +08004327out:
4328 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4329 iov_iter_count(from), ret);
Christoph Hellwige2592212016-04-07 08:52:01 -07004330 if (ret > 0)
4331 ret = generic_write_sync(iocb, ret);
Jaegeuk Kimb439b102016-02-03 13:09:09 -08004332 return ret;
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07004333}
4334
Namjae Jeone9750822013-02-04 23:41:41 +09004335#ifdef CONFIG_COMPAT
Chao Yu34178b1b2020-11-10 09:24:37 +08004336struct compat_f2fs_gc_range {
4337 u32 sync;
4338 compat_u64 start;
4339 compat_u64 len;
4340};
4341#define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4342 struct compat_f2fs_gc_range)
4343
4344static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4345{
4346 struct compat_f2fs_gc_range __user *urange;
4347 struct f2fs_gc_range range;
4348 int err;
4349
4350 urange = compat_ptr(arg);
4351 err = get_user(range.sync, &urange->sync);
4352 err |= get_user(range.start, &urange->start);
4353 err |= get_user(range.len, &urange->len);
4354 if (err)
4355 return -EFAULT;
4356
4357 return __f2fs_ioc_gc_range(file, &range);
4358}
4359
4360struct compat_f2fs_move_range {
4361 u32 dst_fd;
4362 compat_u64 pos_in;
4363 compat_u64 pos_out;
4364 compat_u64 len;
4365};
4366#define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4367 struct compat_f2fs_move_range)
4368
4369static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4370{
4371 struct compat_f2fs_move_range __user *urange;
4372 struct f2fs_move_range range;
4373 int err;
4374
4375 urange = compat_ptr(arg);
4376 err = get_user(range.dst_fd, &urange->dst_fd);
4377 err |= get_user(range.pos_in, &urange->pos_in);
4378 err |= get_user(range.pos_out, &urange->pos_out);
4379 err |= get_user(range.len, &urange->len);
4380 if (err)
4381 return -EFAULT;
4382
4383 return __f2fs_ioc_move_range(file, &range);
4384}
4385
Namjae Jeone9750822013-02-04 23:41:41 +09004386long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4387{
Chao Yu34178b1b2020-11-10 09:24:37 +08004388 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4389 return -EIO;
4390 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4391 return -ENOSPC;
4392
Namjae Jeone9750822013-02-04 23:41:41 +09004393 switch (cmd) {
Eric Biggers3357af82020-07-14 15:18:12 -07004394 case FS_IOC32_GETVERSION:
4395 cmd = FS_IOC_GETVERSION;
Chao Yu04ef4b62015-11-10 18:44:20 +08004396 break;
Chao Yu34178b1b2020-11-10 09:24:37 +08004397 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4398 return f2fs_compat_ioc_gc_range(file, arg);
4399 case F2FS_IOC32_MOVE_RANGE:
4400 return f2fs_compat_ioc_move_range(file, arg);
Chao Yu04ef4b62015-11-10 18:44:20 +08004401 case F2FS_IOC_START_ATOMIC_WRITE:
4402 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4403 case F2FS_IOC_START_VOLATILE_WRITE:
4404 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4405 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4406 case F2FS_IOC_SHUTDOWN:
Arnd Bergmann314999d2019-06-03 13:51:58 +02004407 case FITRIM:
Eric Biggers3357af82020-07-14 15:18:12 -07004408 case FS_IOC_SET_ENCRYPTION_POLICY:
4409 case FS_IOC_GET_ENCRYPTION_PWSALT:
4410 case FS_IOC_GET_ENCRYPTION_POLICY:
Eric Biggers8ce589c2019-08-04 19:35:48 -07004411 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4412 case FS_IOC_ADD_ENCRYPTION_KEY:
4413 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4414 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4415 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
Eric Biggersee446e12020-03-14 13:50:51 -07004416 case FS_IOC_GET_ENCRYPTION_NONCE:
Chao Yu04ef4b62015-11-10 18:44:20 +08004417 case F2FS_IOC_GARBAGE_COLLECT:
4418 case F2FS_IOC_WRITE_CHECKPOINT:
4419 case F2FS_IOC_DEFRAGMENT:
Jaegeuk Kime066b832017-04-13 15:17:00 -07004420 case F2FS_IOC_FLUSH_DEVICE:
Jaegeuk Kime65ef202017-07-21 12:58:59 -07004421 case F2FS_IOC_GET_FEATURES:
Jaegeuk Kim1ad71a22017-12-07 16:25:39 -08004422 case F2FS_IOC_GET_PIN_FILE:
4423 case F2FS_IOC_SET_PIN_FILE:
Chao Yuc4020b22018-01-11 14:42:30 +08004424 case F2FS_IOC_PRECACHE_EXTENTS:
Qiuyang Sun04f0b2e2019-06-05 11:33:25 +08004425 case F2FS_IOC_RESIZE_FS:
Eric Biggers95ae2512019-07-22 09:26:24 -07004426 case FS_IOC_ENABLE_VERITY:
4427 case FS_IOC_MEASURE_VERITY:
Eric Biggerse17fe652021-01-15 10:18:16 -08004428 case FS_IOC_READ_VERITY_METADATA:
Eric Biggers3357af82020-07-14 15:18:12 -07004429 case FS_IOC_GETFSLABEL:
4430 case FS_IOC_SETFSLABEL:
Chao Yu439dfb12020-02-21 18:09:21 +08004431 case F2FS_IOC_GET_COMPRESS_BLOCKS:
Chao Yuef8d5632020-03-06 15:36:09 +08004432 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
Chao Yuc75488f2020-03-06 14:35:33 +08004433 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
Daeho Jeong9af84642020-07-21 12:21:11 +09004434 case F2FS_IOC_SEC_TRIM_FILE:
Daeho Jeong9e2a5f82020-10-30 13:10:34 +09004435 case F2FS_IOC_GET_COMPRESS_OPTION:
Daeho Jeonge1e8deb2020-10-30 13:10:35 +09004436 case F2FS_IOC_SET_COMPRESS_OPTION:
Daeho Jeong5fdb3222020-12-03 15:56:15 +09004437 case F2FS_IOC_DECOMPRESS_FILE:
4438 case F2FS_IOC_COMPRESS_FILE:
Jaegeuk Kim4dd6f972016-07-08 15:16:47 -07004439 break;
Namjae Jeone9750822013-02-04 23:41:41 +09004440 default:
4441 return -ENOIOCTLCMD;
4442 }
Chao Yu34178b1b2020-11-10 09:24:37 +08004443 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
Namjae Jeone9750822013-02-04 23:41:41 +09004444}
4445#endif
4446
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004447const struct file_operations f2fs_file_operations = {
Chao Yu267378d2014-04-23 14:10:24 +08004448 .llseek = f2fs_llseek,
Chao Yu4c8ff702019-11-01 18:07:14 +08004449 .read_iter = f2fs_file_read_iter,
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07004450 .write_iter = f2fs_file_write_iter,
4451 .open = f2fs_file_open,
Jaegeuk Kim12662232014-12-05 14:37:37 -08004452 .release = f2fs_release_file,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004453 .mmap = f2fs_file_mmap,
Jaegeuk Kim7a10f012017-07-24 19:46:29 -07004454 .flush = f2fs_file_flush,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004455 .fsync = f2fs_sync_file,
4456 .fallocate = f2fs_fallocate,
4457 .unlocked_ioctl = f2fs_ioctl,
Namjae Jeone9750822013-02-04 23:41:41 +09004458#ifdef CONFIG_COMPAT
4459 .compat_ioctl = f2fs_compat_ioctl,
4460#endif
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004461 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04004462 .splice_write = iter_file_splice_write,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09004463};