blob: b46714d4810793a4b0a33b4a87b909fa9d9241cf [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Steven Whitehouse3a8a9a12006-05-18 15:09:15 -04004 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
David Teiglandb3b94fa2006-01-16 16:50:04 +00007#include <linux/slab.h>
8#include <linux/spinlock.h>
Arnd Bergmann8d098072019-06-03 13:40:01 +02009#include <linux/compat.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000010#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/uio.h>
14#include <linux/blkdev.h>
15#include <linux/mm.h>
Miklos Szeredif58ba882008-07-02 21:12:01 +020016#include <linux/mount.h>
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000017#include <linux/fs.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050018#include <linux/gfs2_ondisk.h>
Christoph Hellwig2fe17c12011-01-14 13:07:43 +010019#include <linux/falloc.h>
20#include <linux/swap.h>
Steven Whitehouse71b86f52006-03-28 14:14:04 -050021#include <linux/crc32.h>
Steven Whitehouse33c3de32006-11-30 10:14:32 -050022#include <linux/writeback.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080023#include <linux/uaccess.h>
Steven Whitehousef057f6c2009-01-12 10:43:39 +000024#include <linux/dlm.h>
25#include <linux/dlm_plock.h>
Bob Peterson2ddfbdd2014-08-20 12:44:45 -040026#include <linux/delay.h>
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010027#include <linux/backing-dev.h>
Miklos Szeredi88b631c2021-04-07 14:36:43 +020028#include <linux/fileattr.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000029
30#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050031#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000032#include "bmap.h"
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010033#include "aops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000034#include "dir.h"
35#include "glock.h"
36#include "glops.h"
37#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000038#include "log.h"
39#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000040#include "quota.h"
41#include "rgrp.h"
42#include "trans.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050043#include "util.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000044
David Teiglandb3b94fa2006-01-16 16:50:04 +000045/**
46 * gfs2_llseek - seek to a location in a file
47 * @file: the file
48 * @offset: the offset
Andrew Morton965c8e52012-12-17 15:59:39 -080049 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
David Teiglandb3b94fa2006-01-16 16:50:04 +000050 *
51 * SEEK_END requires the glock for the file because it references the
52 * file's size.
53 *
54 * Returns: The new offset, or errno
55 */
56
Andrew Morton965c8e52012-12-17 15:59:39 -080057static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
David Teiglandb3b94fa2006-01-16 16:50:04 +000058{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040059 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +000060 struct gfs2_holder i_gh;
61 loff_t error;
62
Andrew Morton965c8e52012-12-17 15:59:39 -080063 switch (whence) {
Andreas Gruenbacher3a274112017-03-15 19:12:59 +010064 case SEEK_END:
David Teiglandb3b94fa2006-01-16 16:50:04 +000065 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
66 &i_gh);
67 if (!error) {
Andrew Morton965c8e52012-12-17 15:59:39 -080068 error = generic_file_llseek(file, offset, whence);
David Teiglandb3b94fa2006-01-16 16:50:04 +000069 gfs2_glock_dq_uninit(&i_gh);
70 }
Steven Whitehouse94536152011-08-23 10:19:25 +010071 break;
Andreas Gruenbacher3a274112017-03-15 19:12:59 +010072
73 case SEEK_DATA:
74 error = gfs2_seek_data(file, offset);
75 break;
76
77 case SEEK_HOLE:
78 error = gfs2_seek_hole(file, offset);
79 break;
80
Steven Whitehouse94536152011-08-23 10:19:25 +010081 case SEEK_CUR:
82 case SEEK_SET:
Andreas Gruenbacher3a274112017-03-15 19:12:59 +010083 /*
84 * These don't reference inode->i_size and don't depend on the
85 * block mapping, so we don't need the glock.
86 */
Andrew Morton965c8e52012-12-17 15:59:39 -080087 error = generic_file_llseek(file, offset, whence);
Steven Whitehouse94536152011-08-23 10:19:25 +010088 break;
89 default:
90 error = -EINVAL;
91 }
David Teiglandb3b94fa2006-01-16 16:50:04 +000092
93 return error;
94}
95
David Teiglandb3b94fa2006-01-16 16:50:04 +000096/**
Al Virod81a8ef2013-05-16 14:14:48 -040097 * gfs2_readdir - Iterator for a directory
David Teiglandb3b94fa2006-01-16 16:50:04 +000098 * @file: The directory to read from
Al Virod81a8ef2013-05-16 14:14:48 -040099 * @ctx: What to feed directory entries to
David Teiglandb3b94fa2006-01-16 16:50:04 +0000100 *
101 * Returns: errno
102 */
103
Al Virod81a8ef2013-05-16 14:14:48 -0400104static int gfs2_readdir(struct file *file, struct dir_context *ctx)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000105{
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500106 struct inode *dir = file->f_mapping->host;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400107 struct gfs2_inode *dip = GFS2_I(dir);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000108 struct gfs2_holder d_gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000109 int error;
110
Al Virod81a8ef2013-05-16 14:14:48 -0400111 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
112 if (error)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000113 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000114
Al Virod81a8ef2013-05-16 14:14:48 -0400115 error = gfs2_dir_read(dir, ctx, &file->f_ra);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000116
117 gfs2_glock_dq_uninit(&d_gh);
118
David Teiglandb3b94fa2006-01-16 16:50:04 +0000119 return error;
120}
121
Steven Whitehouse128e5eb2006-10-02 11:24:43 -0400122/**
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200123 * fsflag_gfs2flag
Steven Whitehouse128e5eb2006-10-02 11:24:43 -0400124 *
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200125 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
126 * and to GFS2_DIF_JDATA for non-directories.
Steven Whitehouse128e5eb2006-10-02 11:24:43 -0400127 */
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200128static struct {
129 u32 fsflag;
130 u32 gfsflag;
131} fsflag_gfs2flag[] = {
132 {FS_SYNC_FL, GFS2_DIF_SYNC},
133 {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
134 {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
135 {FS_NOATIME_FL, GFS2_DIF_NOATIME},
136 {FS_INDEX_FL, GFS2_DIF_EXHASH},
137 {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
138 {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
Steven Whitehouse7ea9ea82006-03-31 15:01:28 -0500139};
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500140
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700141static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
142{
143 int i;
144 u32 fsflags = 0;
145
146 if (S_ISDIR(inode->i_mode))
147 gfsflags &= ~GFS2_DIF_JDATA;
148 else
149 gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
150
151 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
152 if (gfsflags & fsflag_gfs2flag[i].gfsflag)
153 fsflags |= fsflag_gfs2flag[i].fsflag;
154 return fsflags;
155}
156
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200157int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500158{
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200159 struct inode *inode = d_inode(dentry);
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400160 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500161 struct gfs2_holder gh;
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700162 int error;
163 u32 fsflags;
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500164
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200165 if (d_is_special(dentry))
166 return -ENOTTY;
167
Steven Whitehouse719ee342008-09-18 13:53:59 +0100168 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
169 error = gfs2_glock_nq(&gh);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500170 if (error)
Daniel DeFreez9c7fe832016-04-19 19:57:45 -0400171 goto out_uninit;
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400172
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700173 fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200174
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200175 fileattr_fill_flags(fa, fsflags);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500176
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100177 gfs2_glock_dq(&gh);
Daniel DeFreez9c7fe832016-04-19 19:57:45 -0400178out_uninit:
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500179 gfs2_holder_uninit(&gh);
180 return error;
181}
182
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500183void gfs2_set_inode_flags(struct inode *inode)
184{
185 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500186 unsigned int flags = inode->i_flags;
187
Steven Whitehouse9964afb2011-06-16 14:06:55 +0100188 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
189 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
Benjamin Marzinski01e64ee2015-05-05 12:25:48 -0500190 flags |= S_NOSEC;
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000191 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500192 flags |= S_IMMUTABLE;
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000193 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500194 flags |= S_APPEND;
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000195 if (ip->i_diskflags & GFS2_DIF_NOATIME)
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500196 flags |= S_NOATIME;
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000197 if (ip->i_diskflags & GFS2_DIF_SYNC)
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500198 flags |= S_SYNC;
199 inode->i_flags = flags;
200}
201
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500202/* Flags that can be set by user space */
203#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500204 GFS2_DIF_IMMUTABLE| \
205 GFS2_DIF_APPENDONLY| \
206 GFS2_DIF_NOATIME| \
207 GFS2_DIF_SYNC| \
Steven Whitehouse23d0bb82012-05-28 15:26:56 +0100208 GFS2_DIF_TOPDIR| \
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500209 GFS2_DIF_INHERIT_JDATA)
210
211/**
Fabian Frederick9dd868e2014-05-15 18:57:08 +0200212 * do_gfs2_set_flags - set flags on an inode
213 * @filp: file pointer
214 * @reqflags: The flags to set
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500215 * @mask: Indicates which flags are valid
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700216 * @fsflags: The FS_* inode flags passed in
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500217 *
218 */
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200219static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask,
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700220 const u32 fsflags)
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500221{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400222 struct gfs2_inode *ip = GFS2_I(inode);
223 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500224 struct buffer_head *bh;
225 struct gfs2_holder gh;
226 int error;
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200227 u32 new_flags, flags;
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500228
Miklos Szeredif58ba882008-07-02 21:12:01 +0200229 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
230 if (error)
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200231 return error;
Steven Whitehouse7df0e032010-05-24 14:36:48 +0100232
233 error = 0;
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000234 flags = ip->i_diskflags;
Steven Whitehouse55eccc62006-04-04 14:29:30 -0400235 new_flags = (flags & ~mask) | (reqflags & mask);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500236 if ((new_flags ^ flags) == 0)
237 goto out;
238
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500239 error = -EPERM;
240 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
241 goto out;
242 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
243 goto out;
Steven Whitehouseb9cb9812006-05-12 17:07:56 -0400244 if (!IS_IMMUTABLE(inode)) {
Christian Brauner549c7292021-01-21 14:19:43 +0100245 error = gfs2_permission(&init_user_ns, inode, MAY_WRITE);
Steven Whitehouseb9cb9812006-05-12 17:07:56 -0400246 if (error)
247 goto out;
248 }
Steven Whitehouse55610932007-10-17 08:47:38 +0100249 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
Bob Petersoncc555b092017-09-20 08:30:04 -0500250 if (new_flags & GFS2_DIF_JDATA)
Bob Petersonc1696fb2018-01-17 00:01:33 +0100251 gfs2_log_flush(sdp, ip->i_gl,
Bob Peterson805c09072018-01-08 10:34:17 -0500252 GFS2_LOG_HEAD_FLUSH_NORMAL |
253 GFS2_LFC_SET_FLAGS);
Steven Whitehouse55610932007-10-17 08:47:38 +0100254 error = filemap_fdatawrite(inode->i_mapping);
255 if (error)
256 goto out;
257 error = filemap_fdatawait(inode->i_mapping);
258 if (error)
259 goto out;
Bob Petersoncc555b092017-09-20 08:30:04 -0500260 if (new_flags & GFS2_DIF_JDATA)
261 gfs2_ordered_del_inode(ip);
Steven Whitehouse55610932007-10-17 08:47:38 +0100262 }
Steven Whitehouse55eccc62006-04-04 14:29:30 -0400263 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500264 if (error)
265 goto out;
Steven Whitehouse55eccc62006-04-04 14:29:30 -0400266 error = gfs2_meta_inode_buffer(ip, &bh);
267 if (error)
268 goto out_trans_end;
Andreas Gruenbacher9b7c2dd2017-09-22 08:29:19 -0500269 inode->i_ctime = current_time(inode);
Steven Whitehouse350a9b02012-12-14 12:36:02 +0000270 gfs2_trans_add_meta(ip->i_gl, bh);
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000271 ip->i_diskflags = new_flags;
Steven Whitehouse539e5d62006-10-31 15:07:05 -0500272 gfs2_dinode_out(ip, bh->b_data);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500273 brelse(bh);
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500274 gfs2_set_inode_flags(inode);
Steven Whitehouse55610932007-10-17 08:47:38 +0100275 gfs2_set_aops(inode);
Steven Whitehouse55eccc62006-04-04 14:29:30 -0400276out_trans_end:
277 gfs2_trans_end(sdp);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500278out:
279 gfs2_glock_dq_uninit(&gh);
280 return error;
281}
282
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200283int gfs2_fileattr_set(struct user_namespace *mnt_userns,
284 struct dentry *dentry, struct fileattr *fa)
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500285{
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200286 struct inode *inode = d_inode(dentry);
287 u32 fsflags = fa->flags, gfsflags = 0;
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200288 u32 mask;
289 int i;
Steven Whitehouse7df0e032010-05-24 14:36:48 +0100290
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200291 if (d_is_special(dentry))
292 return -ENOTTY;
293
294 if (fileattr_has_fsx(fa))
295 return -EOPNOTSUPP;
Steven Whitehouse7df0e032010-05-24 14:36:48 +0100296
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200297 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
298 if (fsflags & fsflag_gfs2flag[i].fsflag) {
299 fsflags &= ~fsflag_gfs2flag[i].fsflag;
300 gfsflags |= fsflag_gfs2flag[i].gfsflag;
301 }
Steven Whitehouseb9af7ca2007-07-18 11:40:06 +0100302 }
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200303 if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
304 return -EINVAL;
305
306 mask = GFS2_FLAGS_USER_SET;
307 if (S_ISDIR(inode->i_mode)) {
308 mask &= ~GFS2_DIF_JDATA;
309 } else {
310 /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
311 if (gfsflags & GFS2_DIF_TOPDIR)
312 return -EINVAL;
313 mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
314 }
315
Miklos Szeredi88b631c2021-04-07 14:36:43 +0200316 return do_gfs2_set_flags(inode, gfsflags, mask, fsflags);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500317}
318
Steve Whitehouse6ddc5c32018-10-08 14:32:35 -0500319static int gfs2_getlabel(struct file *filp, char __user *label)
320{
321 struct inode *inode = file_inode(filp);
322 struct gfs2_sbd *sdp = GFS2_SB(inode);
323
324 if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
325 return -EFAULT;
326
327 return 0;
328}
329
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400330static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500331{
332 switch(cmd) {
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000333 case FITRIM:
334 return gfs2_fitrim(filp, (void __user *)arg);
Steve Whitehouse6ddc5c32018-10-08 14:32:35 -0500335 case FS_IOC_GETFSLABEL:
336 return gfs2_getlabel(filp, (char __user *)arg);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500337 }
Steve Whitehouse6ddc5c32018-10-08 14:32:35 -0500338
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500339 return -ENOTTY;
340}
341
Arnd Bergmann8d098072019-06-03 13:40:01 +0200342#ifdef CONFIG_COMPAT
343static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
344{
345 switch(cmd) {
Arnd Bergmann8d098072019-06-03 13:40:01 +0200346 /* Keep this list in sync with gfs2_ioctl */
347 case FITRIM:
348 case FS_IOC_GETFSLABEL:
349 break;
350 default:
351 return -ENOIOCTLCMD;
352 }
353
354 return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
355}
356#else
357#define gfs2_compat_ioctl NULL
358#endif
359
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100360/**
Steven Whitehouseda1dfb62012-07-26 11:30:54 +0100361 * gfs2_size_hint - Give a hint to the size of a write request
Fabian Frederick9dd868e2014-05-15 18:57:08 +0200362 * @filep: The struct file
Steven Whitehouseda1dfb62012-07-26 11:30:54 +0100363 * @offset: The file offset of the write
364 * @size: The length of the write
365 *
366 * When we are about to do a write, this function records the total
367 * write size in order to provide a suitable hint to the lower layers
368 * about how many blocks will be required.
369 *
370 */
371
372static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
373{
Al Viro496ad9a2013-01-23 17:07:38 -0500374 struct inode *inode = file_inode(filep);
Steven Whitehouseda1dfb62012-07-26 11:30:54 +0100375 struct gfs2_sbd *sdp = GFS2_SB(inode);
376 struct gfs2_inode *ip = GFS2_I(inode);
377 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
378 int hint = min_t(size_t, INT_MAX, blks);
379
Andreas Gruenbacher21f09c42018-08-30 16:01:50 +0100380 if (hint > atomic_read(&ip->i_sizehint))
381 atomic_set(&ip->i_sizehint, hint);
Steven Whitehouseda1dfb62012-07-26 11:30:54 +0100382}
383
384/**
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200385 * gfs2_allocate_page_backing - Allocate blocks for a write fault
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100386 * @page: The (locked) page to allocate backing for
Andreas Gruenbacherf53056c2019-11-07 18:06:14 +0000387 * @length: Size of the allocation
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100388 *
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200389 * We try to allocate all the blocks required for the page in one go. This
390 * might fail for various reasons, so we keep trying until all the blocks to
391 * back this page are allocated. If some of the blocks are already allocated,
392 * that is ok too.
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100393 */
Andreas Gruenbacherf53056c2019-11-07 18:06:14 +0000394static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100395{
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200396 u64 pos = page_offset(page);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100397
398 do {
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200399 struct iomap iomap = { };
400
Andreas Gruenbacherf53056c2019-11-07 18:06:14 +0000401 if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap))
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100402 return -EIO;
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200403
Andreas Gruenbacherf53056c2019-11-07 18:06:14 +0000404 if (length < iomap.length)
405 iomap.length = length;
406 length -= iomap.length;
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200407 pos += iomap.length;
Andreas Gruenbacherf53056c2019-11-07 18:06:14 +0000408 } while (length > 0);
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200409
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100410 return 0;
411}
412
413/**
414 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
415 * @vma: The virtual memory area
Fabian Frederick9dd868e2014-05-15 18:57:08 +0200416 * @vmf: The virtual memory fault containing the page to become writable
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100417 *
418 * When the page becomes writable, we need to ensure that we have
419 * blocks allocated on disk to back that page.
420 */
421
Souptick Joarder109dbb12018-07-02 22:16:13 +0530422static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100423{
Nick Pigginc2ec1752009-03-31 15:23:21 -0700424 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -0800425 struct inode *inode = file_inode(vmf->vma->vm_file);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100426 struct gfs2_inode *ip = GFS2_I(inode);
427 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100428 struct gfs2_alloc_parms ap = { .aflags = 0, };
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000429 u64 offset = page_offset(page);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100430 unsigned int data_blocks, ind_blocks, rblocks;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100431 struct gfs2_holder gh;
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000432 unsigned int length;
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100433 loff_t size;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100434 int ret;
435
Jan Kara39263d5e2012-06-12 16:20:41 +0200436 sb_start_pagefault(inode->i_sb);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100437
Steven Whitehouse719ee342008-09-18 13:53:59 +0100438 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
439 ret = gfs2_glock_nq(&gh);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100440 if (ret)
Bob Peterson2b3dcf32013-05-28 10:04:44 -0400441 goto out_uninit;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100442
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000443 /* Check page index against inode size */
444 size = i_size_read(inode);
445 if (offset >= size) {
446 ret = -EINVAL;
447 goto out_unlock;
448 }
449
Andreas Gruenbacherd7c436c2016-09-26 13:20:19 -0500450 /* Update file times before taking page lock */
Dave Jiang11bac802017-02-24 14:56:41 -0800451 file_update_time(vmf->vma->vm_file);
Andreas Gruenbacherd7c436c2016-09-26 13:20:19 -0500452
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000453 /* page is wholly or partially inside EOF */
454 if (offset > size - PAGE_SIZE)
455 length = offset_in_page(size);
456 else
457 length = PAGE_SIZE;
458
459 gfs2_size_hint(vmf->vma->vm_file, offset, length);
460
Steven Whitehouse9c538832009-03-19 13:15:44 +0000461 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
462 set_bit(GIF_SW_PAGED, &ip->i_flags);
463
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000464 /*
465 * iomap_writepage / iomap_writepages currently don't support inline
466 * files, so always unstuff here.
467 */
468
469 if (!gfs2_is_stuffed(ip) &&
470 !gfs2_write_alloc_required(ip, offset, length)) {
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100471 lock_page(page);
472 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
473 ret = -EAGAIN;
474 unlock_page(page);
475 }
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100476 goto out_unlock;
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100477 }
478
Bob Peterson5407e242012-05-18 09:28:23 -0400479 ret = gfs2_rindex_update(sdp);
480 if (ret)
Steven Whitehouse6dbd8222008-01-10 15:18:55 +0000481 goto out_unlock;
482
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000483 gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100484 ap.target = data_blocks + ind_blocks;
Abhi Dasb8fbf472015-03-18 12:03:41 -0500485 ret = gfs2_quota_lock_check(ip, &ap);
486 if (ret)
487 goto out_unlock;
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100488 ret = gfs2_inplace_reserve(ip, &ap);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100489 if (ret)
490 goto out_quota_unlock;
491
492 rblocks = RES_DINODE + ind_blocks;
493 if (gfs2_is_jdata(ip))
494 rblocks += data_blocks ? data_blocks : 1;
Benjamin Marzinskibf97b672010-09-27 16:00:04 -0500495 if (ind_blocks || data_blocks) {
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100496 rblocks += RES_STATFS + RES_QUOTA;
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100497 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
Benjamin Marzinskibf97b672010-09-27 16:00:04 -0500498 }
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100499 ret = gfs2_trans_begin(sdp, rblocks, 0);
500 if (ret)
501 goto out_trans_fail;
502
503 lock_page(page);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100504 ret = -EAGAIN;
505 /* If truncated, we must retry the operation, we may have raced
506 * with the glock demotion code.
507 */
508 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
509 goto out_trans_end;
510
511 /* Unstuff, if required, and allocate backing blocks for page */
512 ret = 0;
513 if (gfs2_is_stuffed(ip))
514 ret = gfs2_unstuff_dinode(ip, page);
515 if (ret == 0)
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000516 ret = gfs2_allocate_page_backing(page, length);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100517
518out_trans_end:
519 if (ret)
520 unlock_page(page);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100521 gfs2_trans_end(sdp);
522out_trans_fail:
523 gfs2_inplace_release(ip);
524out_quota_unlock:
525 gfs2_quota_unlock(ip);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100526out_unlock:
527 gfs2_glock_dq(&gh);
Bob Peterson2b3dcf32013-05-28 10:04:44 -0400528out_uninit:
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100529 gfs2_holder_uninit(&gh);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100530 if (ret == 0) {
531 set_page_dirty(page);
Darrick J. Wong1d1d1a72013-02-21 16:42:51 -0800532 wait_for_stable_page(page);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100533 }
Jan Kara39263d5e2012-06-12 16:20:41 +0200534 sb_end_pagefault(inode->i_sb);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100535 return block_page_mkwrite_return(ret);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100536}
537
Andreas Gruenbacher20f82992020-07-01 19:25:19 +0200538static vm_fault_t gfs2_fault(struct vm_fault *vmf)
539{
540 struct inode *inode = file_inode(vmf->vma->vm_file);
541 struct gfs2_inode *ip = GFS2_I(inode);
542 struct gfs2_holder gh;
543 vm_fault_t ret;
544 int err;
545
546 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
547 err = gfs2_glock_nq(&gh);
548 if (err) {
549 ret = block_page_mkwrite_return(err);
550 goto out_uninit;
551 }
552 ret = filemap_fault(vmf);
553 gfs2_glock_dq(&gh);
554out_uninit:
555 gfs2_holder_uninit(&gh);
556 return ret;
557}
558
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400559static const struct vm_operations_struct gfs2_vm_ops = {
Andreas Gruenbacher20f82992020-07-01 19:25:19 +0200560 .fault = gfs2_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700561 .map_pages = filemap_map_pages,
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100562 .page_mkwrite = gfs2_page_mkwrite,
563};
564
David Teiglandb3b94fa2006-01-16 16:50:04 +0000565/**
566 * gfs2_mmap -
567 * @file: The file to map
568 * @vma: The VMA which described the mapping
569 *
Steven Whitehouse48bf2b12009-04-29 13:59:35 +0100570 * There is no need to get a lock here unless we should be updating
571 * atime. We ignore any locking errors since the only consequence is
572 * a missed atime update (which will just be deferred until later).
573 *
574 * Returns: 0
David Teiglandb3b94fa2006-01-16 16:50:04 +0000575 */
576
577static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
578{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400579 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000580
Steven Whitehouseb9c93bb2011-02-02 14:48:10 +0000581 if (!(file->f_flags & O_NOATIME) &&
582 !IS_NOATIME(&ip->i_inode)) {
Steven Whitehouse48bf2b12009-04-29 13:59:35 +0100583 struct gfs2_holder i_gh;
584 int error;
585
Benjamin Marzinski3d162682012-11-06 00:49:28 -0600586 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
587 &i_gh);
Steven Whitehouseb9c93bb2011-02-02 14:48:10 +0000588 if (error)
589 return error;
Benjamin Marzinski3d162682012-11-06 00:49:28 -0600590 /* grab lock to update inode */
591 gfs2_glock_dq_uninit(&i_gh);
592 file_accessed(file);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000593 }
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100594 vma->vm_ops = &gfs2_vm_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000595
Steven Whitehouse48bf2b12009-04-29 13:59:35 +0100596 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000597}
598
599/**
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100600 * gfs2_open_common - This is common to open and atomic_open
601 * @inode: The inode being opened
602 * @file: The file being opened
603 *
604 * This maybe called under a glock or not depending upon how it has
605 * been called. We must always be called under a glock for regular
606 * files, however. For other file types, it does not matter whether
607 * we hold the glock or not.
608 *
609 * Returns: Error code or 0 for success
610 */
611
612int gfs2_open_common(struct inode *inode, struct file *file)
613{
614 struct gfs2_file *fp;
615 int ret;
616
617 if (S_ISREG(inode->i_mode)) {
618 ret = generic_file_open(inode, file);
619 if (ret)
620 return ret;
621 }
622
623 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
624 if (!fp)
625 return -ENOMEM;
626
627 mutex_init(&fp->f_fl_mutex);
628
629 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
630 file->private_data = fp;
Bob Peterson2fba46a2020-02-27 12:47:53 -0600631 if (file->f_mode & FMODE_WRITE) {
632 ret = gfs2_qa_get(GFS2_I(inode));
633 if (ret)
634 goto fail;
635 }
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100636 return 0;
Bob Peterson2fba46a2020-02-27 12:47:53 -0600637
638fail:
639 kfree(file->private_data);
640 file->private_data = NULL;
641 return ret;
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100642}
643
644/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000645 * gfs2_open - open a file
646 * @inode: the inode to open
647 * @file: the struct file for this opening
648 *
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100649 * After atomic_open, this function is only used for opening files
650 * which are already cached. We must still get the glock for regular
651 * files to ensure that we have the file size uptodate for the large
652 * file check which is in the common code. That is only an issue for
653 * regular files though.
654 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000655 * Returns: errno
656 */
657
658static int gfs2_open(struct inode *inode, struct file *file)
659{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400660 struct gfs2_inode *ip = GFS2_I(inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000661 struct gfs2_holder i_gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000662 int error;
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100663 bool need_unlock = false;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000664
Steven Whitehouseb60623c2006-11-01 12:22:46 -0500665 if (S_ISREG(ip->i_inode.i_mode)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000666 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
667 &i_gh);
668 if (error)
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100669 return error;
670 need_unlock = true;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000671 }
672
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100673 error = gfs2_open_common(inode, file);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000674
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100675 if (need_unlock)
676 gfs2_glock_dq_uninit(&i_gh);
677
David Teiglandb3b94fa2006-01-16 16:50:04 +0000678 return error;
679}
680
681/**
Bob Petersondf3fd112012-04-11 12:56:41 -0400682 * gfs2_release - called to close a struct file
David Teiglandb3b94fa2006-01-16 16:50:04 +0000683 * @inode: the inode the struct file belongs to
684 * @file: the struct file being closed
685 *
686 * Returns: errno
687 */
688
Bob Petersondf3fd112012-04-11 12:56:41 -0400689static int gfs2_release(struct inode *inode, struct file *file)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000690{
Bob Peterson0a305e42012-06-06 11:17:59 +0100691 struct gfs2_inode *ip = GFS2_I(inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000692
Bob Peterson8e2e0042012-07-19 08:12:40 -0400693 kfree(file->private_data);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500694 file->private_data = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000695
Andreas Gruenbacher0ec9b9e2020-10-21 16:37:54 +0200696 if (gfs2_rs_active(&ip->i_res))
Andreas Gruenbacher15955482020-03-06 10:32:35 -0600697 gfs2_rs_delete(ip, &inode->i_writecount);
Andreas Gruenbacher0ec9b9e2020-10-21 16:37:54 +0200698 if (file->f_mode & FMODE_WRITE)
Andreas Gruenbacher15955482020-03-06 10:32:35 -0600699 gfs2_qa_put(ip);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000700 return 0;
701}
702
703/**
704 * gfs2_fsync - sync the dirty data for a file (across the cluster)
Josef Bacik02c24a82011-07-16 20:44:56 -0400705 * @file: the file that points to the dentry
706 * @start: the start position in the file to sync
707 * @end: the end position in the file to sync
Steven Whitehousedba898b2011-04-14 09:54:02 +0100708 * @datasync: set if we can ignore timestamp changes
David Teiglandb3b94fa2006-01-16 16:50:04 +0000709 *
Steven Whitehouse2f0264d2011-07-27 10:58:48 +0100710 * We split the data flushing here so that we don't wait for the data
711 * until after we've also sent the metadata to disk. Note that for
712 * data=ordered, we will write & wait for the data at the log flush
713 * stage anyway, so this is unlikely to make much of a difference
714 * except in the data=writeback case.
715 *
716 * If the fdatawrite fails due to any reason except -EIO, we will
717 * continue the remainder of the fsync, although we'll still report
718 * the error at the end. This is to match filemap_write_and_wait_range()
719 * behaviour.
Steven Whitehouse34126f92006-12-07 09:13:14 -0500720 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000721 * Returns: errno
722 */
723
Josef Bacik02c24a82011-07-16 20:44:56 -0400724static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
725 int datasync)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000726{
Steven Whitehouse2f0264d2011-07-27 10:58:48 +0100727 struct address_space *mapping = file->f_mapping;
728 struct inode *inode = mapping->host;
Eric Biggers3aac6302021-01-12 11:02:52 -0800729 int sync_state = inode->i_state & I_DIRTY;
Steven Whitehousedba898b2011-04-14 09:54:02 +0100730 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse87654892011-11-08 14:04:20 +0000731 int ret = 0, ret1 = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000732
Steven Whitehouse2f0264d2011-07-27 10:58:48 +0100733 if (mapping->nrpages) {
734 ret1 = filemap_fdatawrite_range(mapping, start, end);
735 if (ret1 == -EIO)
736 return ret1;
737 }
Josef Bacik02c24a82011-07-16 20:44:56 -0400738
Benjamin Marzinski0c901802013-09-03 16:59:42 -0500739 if (!gfs2_is_jdata(ip))
740 sync_state &= ~I_DIRTY_PAGES;
Steven Whitehousedba898b2011-04-14 09:54:02 +0100741 if (datasync)
Eric Biggers3aac6302021-01-12 11:02:52 -0800742 sync_state &= ~I_DIRTY_SYNC;
Steven Whitehousedba898b2011-04-14 09:54:02 +0100743
744 if (sync_state) {
745 ret = sync_inode_metadata(inode, 1);
Steven Whitehouseb5b24d72011-09-07 10:33:25 +0100746 if (ret)
Steven Whitehousedba898b2011-04-14 09:54:02 +0100747 return ret;
Steven Whitehousef1818522011-08-05 10:12:47 +0100748 if (gfs2_is_jdata(ip))
Jeff Laytond07a6ac2017-07-07 15:20:53 -0400749 ret = file_write_and_wait(file);
750 if (ret)
751 return ret;
Steven Whitehouseb5b24d72011-09-07 10:33:25 +0100752 gfs2_ail_flush(ip->i_gl, 1);
Steven Whitehouse33c3de32006-11-30 10:14:32 -0500753 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000754
Steven Whitehouse2f0264d2011-07-27 10:58:48 +0100755 if (mapping->nrpages)
Jeff Laytond07a6ac2017-07-07 15:20:53 -0400756 ret = file_fdatawait_range(file, start, end);
Steven Whitehouse2f0264d2011-07-27 10:58:48 +0100757
758 return ret ? ret : ret1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000759}
760
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500761static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
762 struct gfs2_holder *gh)
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100763{
764 struct file *file = iocb->ki_filp;
765 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
766 size_t count = iov_iter_count(to);
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100767 ssize_t ret;
768
769 if (!count)
770 return 0; /* skip atime */
771
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500772 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
773 ret = gfs2_glock_nq(gh);
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100774 if (ret)
775 goto out_uninit;
776
Christoph Hellwig2f632962021-01-23 10:06:09 -0800777 ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL, 0);
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500778 gfs2_glock_dq(gh);
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100779out_uninit:
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500780 gfs2_holder_uninit(gh);
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100781 return ret;
782}
783
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500784static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
785 struct gfs2_holder *gh)
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100786{
787 struct file *file = iocb->ki_filp;
788 struct inode *inode = file->f_mapping->host;
789 struct gfs2_inode *ip = GFS2_I(inode);
790 size_t len = iov_iter_count(from);
791 loff_t offset = iocb->ki_pos;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100792 ssize_t ret;
793
794 /*
795 * Deferred lock, even if its a write, since we do no allocation on
796 * this path. All we need to change is the atime, and this lock mode
797 * ensures that other nodes have flushed their buffered read caches
798 * (i.e. their page cache entries for this inode). We do not,
799 * unfortunately, have the option of only flushing a range like the
800 * VFS does.
801 */
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500802 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
803 ret = gfs2_glock_nq(gh);
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100804 if (ret)
805 goto out_uninit;
806
807 /* Silently fall back to buffered I/O when writing beyond EOF */
808 if (offset + len > i_size_read(&ip->i_inode))
809 goto out;
810
Christoph Hellwig2f632962021-01-23 10:06:09 -0800811 ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL, 0);
Christoph Hellwig60263d52020-07-23 22:45:59 -0700812 if (ret == -ENOTBLK)
813 ret = 0;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100814out:
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500815 gfs2_glock_dq(gh);
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100816out_uninit:
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500817 gfs2_holder_uninit(gh);
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100818 return ret;
819}
820
821static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
822{
Andreas Gruenbacher20f82992020-07-01 19:25:19 +0200823 struct gfs2_inode *ip;
824 struct gfs2_holder gh;
825 size_t written = 0;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100826 ssize_t ret;
827
828 if (iocb->ki_flags & IOCB_DIRECT) {
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500829 ret = gfs2_file_direct_read(iocb, to, &gh);
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100830 if (likely(ret != -ENOTBLK))
831 return ret;
832 iocb->ki_flags &= ~IOCB_DIRECT;
833 }
Andreas Gruenbacher20f82992020-07-01 19:25:19 +0200834 iocb->ki_flags |= IOCB_NOIO;
835 ret = generic_file_read_iter(iocb, to);
836 iocb->ki_flags &= ~IOCB_NOIO;
837 if (ret >= 0) {
838 if (!iov_iter_count(to))
839 return ret;
840 written = ret;
841 } else {
842 if (ret != -EAGAIN)
843 return ret;
844 if (iocb->ki_flags & IOCB_NOWAIT)
845 return ret;
846 }
847 ip = GFS2_I(iocb->ki_filp->f_mapping->host);
848 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
849 ret = gfs2_glock_nq(&gh);
850 if (ret)
851 goto out_uninit;
852 ret = generic_file_read_iter(iocb, to);
853 if (ret > 0)
854 written += ret;
855 gfs2_glock_dq(&gh);
856out_uninit:
857 gfs2_holder_uninit(&gh);
858 return written ? written : ret;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100859}
860
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000861/**
Al Viroda56e452014-04-03 14:11:01 -0400862 * gfs2_file_write_iter - Perform a write to a file
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000863 * @iocb: The io context
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100864 * @from: The data to write
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000865 *
866 * We have to do a lock/unlock here to refresh the inode size for
867 * O_APPEND writes, otherwise we can land up writing at the wrong
868 * offset. There is still a race, but provided the app is using its
869 * own file locking, this will make O_APPEND work as expected.
870 *
871 */
872
Al Viroda56e452014-04-03 14:11:01 -0400873static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000874{
875 struct file *file = iocb->ki_filp;
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100876 struct inode *inode = file_inode(file);
877 struct gfs2_inode *ip = GFS2_I(inode);
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500878 struct gfs2_holder gh;
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100879 ssize_t ret;
Bob Peterson0a305e42012-06-06 11:17:59 +0100880
Al Viroda56e452014-04-03 14:11:01 -0400881 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
Steven Whitehouseda1dfb62012-07-26 11:30:54 +0100882
Al Viro2ba48ce2015-04-09 13:52:01 -0400883 if (iocb->ki_flags & IOCB_APPEND) {
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000884 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
885 if (ret)
Andreas Gruenbacher4bd684b2020-03-06 10:51:41 -0600886 return ret;
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000887 gfs2_glock_dq_uninit(&gh);
888 }
889
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100890 inode_lock(inode);
891 ret = generic_write_checks(iocb, from);
892 if (ret <= 0)
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100893 goto out_unlock;
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100894
895 ret = file_remove_privs(file);
896 if (ret)
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100897 goto out_unlock;
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100898
899 ret = file_update_time(file);
900 if (ret)
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100901 goto out_unlock;
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100902
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100903 if (iocb->ki_flags & IOCB_DIRECT) {
904 struct address_space *mapping = file->f_mapping;
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100905 ssize_t buffered, ret2;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100906
Andreas Gruenbacher4c5c3012020-06-29 17:47:15 -0500907 ret = gfs2_file_direct_write(iocb, from, &gh);
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100908 if (ret < 0 || !iov_iter_count(from))
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100909 goto out_unlock;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100910
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100911 iocb->ki_flags |= IOCB_DSYNC;
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100912 current->backing_dev_info = inode_to_bdi(inode);
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100913 buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100914 current->backing_dev_info = NULL;
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100915 if (unlikely(buffered <= 0))
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100916 goto out_unlock;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100917
918 /*
919 * We need to ensure that the page cache pages are written to
920 * disk and invalidated to preserve the expected O_DIRECT
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100921 * semantics. If the writeback or invalidate fails, only report
922 * the direct I/O range as we don't know if the buffered pages
923 * made it to disk.
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100924 */
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100925 iocb->ki_pos += buffered;
926 ret2 = generic_write_sync(iocb, buffered);
927 invalidate_mapping_pages(mapping,
928 (iocb->ki_pos - buffered) >> PAGE_SHIFT,
929 (iocb->ki_pos - 1) >> PAGE_SHIFT);
930 if (!ret || ret2 > 0)
931 ret += ret2;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100932 } else {
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100933 current->backing_dev_info = inode_to_bdi(inode);
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100934 ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100935 current->backing_dev_info = NULL;
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100936 if (likely(ret > 0)) {
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100937 iocb->ki_pos += ret;
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100938 ret = generic_write_sync(iocb, ret);
939 }
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100940 }
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100941
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100942out_unlock:
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100943 inode_unlock(inode);
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100944 return ret;
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000945}
946
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100947static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
948 int mode)
949{
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700950 struct super_block *sb = inode->i_sb;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100951 struct gfs2_inode *ip = GFS2_I(inode);
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700952 loff_t end = offset + len;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100953 struct buffer_head *dibh;
954 int error;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100955
956 error = gfs2_meta_inode_buffer(ip, &dibh);
957 if (unlikely(error))
Benjamin Marzinski64dd1532011-09-12 18:15:24 -0500958 return error;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100959
Steven Whitehouse350a9b02012-12-14 12:36:02 +0000960 gfs2_trans_add_meta(ip->i_gl, dibh);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100961
962 if (gfs2_is_stuffed(ip)) {
963 error = gfs2_unstuff_dinode(ip, NULL);
964 if (unlikely(error))
965 goto out;
966 }
967
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700968 while (offset < end) {
Andreas Gruenbacherc25892822018-07-06 23:05:41 +0100969 struct iomap iomap = { };
970
Andreas Gruenbacher628e3662018-06-04 07:56:51 -0500971 error = gfs2_iomap_get_alloc(inode, offset, end - offset,
972 &iomap);
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700973 if (error)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100974 goto out;
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700975 offset = iomap.offset + iomap.length;
Andreas Gruenbacherd505a96a2018-06-24 10:43:49 +0100976 if (!(iomap.flags & IOMAP_F_NEW))
Benjamin Marzinski64dd1532011-09-12 18:15:24 -0500977 continue;
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700978 error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
979 iomap.length >> inode->i_blkbits,
980 GFP_NOFS);
981 if (error) {
982 fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
Benjamin Marzinski64dd1532011-09-12 18:15:24 -0500983 goto out;
984 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100985 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100986out:
Benjamin Marzinski64dd1532011-09-12 18:15:24 -0500987 brelse(dibh);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100988 return error;
989}
Andreas Gruenbacherf3b64b52019-08-31 21:29:12 +0100990
Abhi Dasd9be0cd2015-03-18 12:05:15 -0500991/**
992 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
993 * blocks, determine how many bytes can be written.
994 * @ip: The inode in question.
995 * @len: Max cap of bytes. What we return in *len must be <= this.
996 * @data_blocks: Compute and return the number of data blocks needed
997 * @ind_blocks: Compute and return the number of indirect blocks needed
998 * @max_blocks: The total blocks available to work with.
999 *
1000 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
1001 */
1002static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
1003 unsigned int *data_blocks, unsigned int *ind_blocks,
1004 unsigned int max_blocks)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001005{
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001006 loff_t max = *len;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001007 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001008 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
1009
1010 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
1011 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
1012 max_data -= tmp;
1013 }
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001014
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001015 *data_blocks = max_data;
1016 *ind_blocks = max_blocks - max_data;
1017 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
1018 if (*len > max) {
1019 *len = max;
1020 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
1021 }
1022}
1023
Andrew Price9c9f1152014-11-12 17:24:03 +00001024static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001025{
Al Viro496ad9a2013-01-23 17:07:38 -05001026 struct inode *inode = file_inode(file);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001027 struct gfs2_sbd *sdp = GFS2_SB(inode);
1028 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001029 struct gfs2_alloc_parms ap = { .aflags = 0, };
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001030 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
Andreas Gruenbacher174d1232018-02-20 08:03:24 -07001031 loff_t bytes, max_bytes, max_blks;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001032 int error;
Steven Whitehouse4442f2e2011-11-21 10:01:25 +00001033 const loff_t pos = offset;
1034 const loff_t count = len;
Benjamin Marzinski6905d9e2011-04-26 01:13:24 -05001035 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001036 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
Benjamin Marzinski64dd1532011-09-12 18:15:24 -05001037 loff_t max_chunk_size = UINT_MAX & bsize_mask;
Bob Petersona0846a52014-02-06 10:43:50 -05001038
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001039 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1040
Benjamin Marzinski6905d9e2011-04-26 01:13:24 -05001041 offset &= bsize_mask;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001042
1043 len = next - offset;
1044 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1045 if (!bytes)
1046 bytes = UINT_MAX;
Benjamin Marzinski6905d9e2011-04-26 01:13:24 -05001047 bytes &= bsize_mask;
1048 if (bytes == 0)
1049 bytes = sdp->sd_sb.sb_bsize;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001050
Steven Whitehouseda1dfb62012-07-26 11:30:54 +01001051 gfs2_size_hint(file, offset, len);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001052
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001053 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
1054 ap.min_target = data_blocks + ind_blocks;
1055
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001056 while (len > 0) {
1057 if (len < bytes)
1058 bytes = len;
Benjamin Marzinski58a7d5f2012-03-08 13:16:32 -06001059 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
1060 len -= bytes;
1061 offset += bytes;
1062 continue;
1063 }
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001064
1065 /* We need to determine how many bytes we can actually
1066 * fallocate without exceeding quota or going over the
1067 * end of the fs. We start off optimistically by assuming
1068 * we can write max_bytes */
1069 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
1070
1071 /* Since max_bytes is most likely a theoretical max, we
1072 * calculate a more realistic 'bytes' to serve as a good
1073 * starting point for the number of bytes we may be able
1074 * to write */
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001075 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001076 ap.target = data_blocks + ind_blocks;
Abhi Dasb8fbf472015-03-18 12:03:41 -05001077
1078 error = gfs2_quota_lock_check(ip, &ap);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001079 if (error)
1080 return error;
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001081 /* ap.allowed tells us how many blocks quota will allow
1082 * us to write. Check if this reduces max_blks */
Andreas Gruenbacher174d1232018-02-20 08:03:24 -07001083 max_blks = UINT_MAX;
1084 if (ap.allowed)
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001085 max_blks = ap.allowed;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001086
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001087 error = gfs2_inplace_reserve(ip, &ap);
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001088 if (error)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001089 goto out_qunlock;
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001090
1091 /* check if the selected rgrp limits our max_blks further */
Andreas Gruenbacher725d0e92018-10-02 14:59:54 +01001092 if (ip->i_res.rs_reserved < max_blks)
1093 max_blks = ip->i_res.rs_reserved;
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001094
1095 /* Almost done. Calculate bytes that can be written using
1096 * max_blks. We also recompute max_bytes, data_blocks and
1097 * ind_blocks */
1098 calc_max_reserv(ip, &max_bytes, &data_blocks,
1099 &ind_blocks, max_blks);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001100
1101 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
Steven Whitehouse71f890f2012-07-30 14:53:19 +01001102 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001103 if (gfs2_is_jdata(ip))
1104 rblocks += data_blocks ? data_blocks : 1;
1105
1106 error = gfs2_trans_begin(sdp, rblocks,
Andreas Gruenbacher45eb0502019-09-02 17:31:06 +01001107 PAGE_SIZE >> inode->i_blkbits);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001108 if (error)
1109 goto out_trans_fail;
1110
1111 error = fallocate_chunk(inode, offset, max_bytes, mode);
1112 gfs2_trans_end(sdp);
1113
1114 if (error)
1115 goto out_trans_fail;
1116
1117 len -= max_bytes;
1118 offset += max_bytes;
1119 gfs2_inplace_release(ip);
1120 gfs2_quota_unlock(ip);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001121 }
Steven Whitehouse4442f2e2011-11-21 10:01:25 +00001122
Andreas Gruenbacher0a6a4ab2019-08-08 19:29:54 +01001123 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size)
Andrew Price18858672014-11-12 17:24:04 +00001124 i_size_write(inode, pos + count);
Andreas Gruenbacher0a6a4ab2019-08-08 19:29:54 +01001125 file_update_time(file);
1126 mark_inode_dirty(inode);
Andrew Price18858672014-11-12 17:24:04 +00001127
Christoph Hellwigdde0c2e2016-04-07 08:52:00 -07001128 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
1129 return vfs_fsync_range(file, pos, pos + count - 1,
1130 (file->f_flags & __O_SYNC) ? 0 : 1);
1131 return 0;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001132
1133out_trans_fail:
1134 gfs2_inplace_release(ip);
1135out_qunlock:
1136 gfs2_quota_unlock(ip);
Andrew Price9c9f1152014-11-12 17:24:03 +00001137 return error;
1138}
1139
1140static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1141{
1142 struct inode *inode = file_inode(file);
Andrew Priced4d7fc12017-04-05 11:45:26 -04001143 struct gfs2_sbd *sdp = GFS2_SB(inode);
Andrew Price9c9f1152014-11-12 17:24:03 +00001144 struct gfs2_inode *ip = GFS2_I(inode);
1145 struct gfs2_holder gh;
1146 int ret;
1147
Andreas Gruenbacher4e56a642017-12-14 17:11:03 +01001148 if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
Andrew Priced4d7fc12017-04-05 11:45:26 -04001149 return -EOPNOTSUPP;
1150 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
1151 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
Andrew Price9c9f1152014-11-12 17:24:03 +00001152 return -EOPNOTSUPP;
1153
Al Viro59551022016-01-22 15:40:57 -05001154 inode_lock(inode);
Andrew Price9c9f1152014-11-12 17:24:03 +00001155
1156 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1157 ret = gfs2_glock_nq(&gh);
1158 if (ret)
1159 goto out_uninit;
1160
1161 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1162 (offset + len) > inode->i_size) {
1163 ret = inode_newsize_ok(inode, offset + len);
1164 if (ret)
1165 goto out_unlock;
1166 }
1167
1168 ret = get_write_access(inode);
1169 if (ret)
1170 goto out_unlock;
1171
Andreas Gruenbacher4e56a642017-12-14 17:11:03 +01001172 if (mode & FALLOC_FL_PUNCH_HOLE) {
1173 ret = __gfs2_punch_hole(file, offset, len);
1174 } else {
Andreas Gruenbacher4e56a642017-12-14 17:11:03 +01001175 ret = __gfs2_fallocate(file, mode, offset, len);
Andreas Gruenbacher4e56a642017-12-14 17:11:03 +01001176 if (ret)
1177 gfs2_rs_deltree(&ip->i_res);
1178 }
Bob Petersona097dc7e2015-07-16 08:28:04 -05001179
Andrew Price9c9f1152014-11-12 17:24:03 +00001180 put_write_access(inode);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001181out_unlock:
Bob Petersona0846a52014-02-06 10:43:50 -05001182 gfs2_glock_dq(&gh);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001183out_uninit:
Bob Petersona0846a52014-02-06 10:43:50 -05001184 gfs2_holder_uninit(&gh);
Al Viro59551022016-01-22 15:40:57 -05001185 inode_unlock(inode);
Andrew Price9c9f1152014-11-12 17:24:03 +00001186 return ret;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001187}
1188
Bob Petersonf1ea6f42015-02-24 07:22:28 -06001189static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
1190 struct file *out, loff_t *ppos,
1191 size_t len, unsigned int flags)
1192{
Bob Peterson2fba46a2020-02-27 12:47:53 -06001193 ssize_t ret;
Bob Petersonf1ea6f42015-02-24 07:22:28 -06001194
Bob Petersonf1ea6f42015-02-24 07:22:28 -06001195 gfs2_size_hint(out, *ppos, len);
1196
Bob Peterson2fba46a2020-02-27 12:47:53 -06001197 ret = iter_file_splice_write(pipe, out, ppos, len, flags);
Bob Peterson2fba46a2020-02-27 12:47:53 -06001198 return ret;
Bob Petersonf1ea6f42015-02-24 07:22:28 -06001199}
1200
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001201#ifdef CONFIG_GFS2_FS_LOCKING_DLM
1202
David Teiglandb3b94fa2006-01-16 16:50:04 +00001203/**
1204 * gfs2_lock - acquire/release a posix lock on a file
1205 * @file: the file pointer
1206 * @cmd: either modify or retrieve lock state, possibly wait
1207 * @fl: type and range of lock
1208 *
1209 * Returns: errno
1210 */
1211
1212static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
1213{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001214 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
1215 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001216 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001217
David Teiglandb3b94fa2006-01-16 16:50:04 +00001218 if (!(fl->fl_flags & FL_POSIX))
1219 return -ENOLCK;
Sachin Prabhu720e7742010-03-11 12:24:45 -05001220 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001221 return -ENOLCK;
1222
Marc Eshel586759f2006-11-14 16:37:25 -05001223 if (cmd == F_CANCELLK) {
1224 /* Hack: */
1225 cmd = F_SETLK;
1226 fl->fl_type = F_UNLCK;
1227 }
Bob Petersoneb43e662019-11-14 09:52:15 -05001228 if (unlikely(gfs2_withdrawn(sdp))) {
Steven Whitehousec2952d22013-03-14 15:49:59 +00001229 if (fl->fl_type == F_UNLCK)
Benjamin Coddington4f656362015-10-22 13:38:14 -04001230 locks_lock_file_wait(file, fl);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001231 return -EIO;
Steven Whitehousec2952d22013-03-14 15:49:59 +00001232 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001233 if (IS_GETLK(cmd))
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001234 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001235 else if (fl->fl_type == F_UNLCK)
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001236 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001237 else
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001238 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001239}
1240
David Teiglandb3b94fa2006-01-16 16:50:04 +00001241static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1242{
Steven Whitehouse5c676f62006-02-27 17:23:27 -05001243 struct gfs2_file *fp = file->private_data;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001244 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
Al Viro496ad9a2013-01-23 17:07:38 -05001245 struct gfs2_inode *ip = GFS2_I(file_inode(file));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001246 struct gfs2_glock *gl;
1247 unsigned int state;
Bob Petersonb58bf402015-07-24 09:45:43 -05001248 u16 flags;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001249 int error = 0;
Bob Peterson2ddfbdd2014-08-20 12:44:45 -04001250 int sleeptime;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001251
1252 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
Bob Peterson2ddfbdd2014-08-20 12:44:45 -04001253 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001254
Steven Whitehousef55ab262006-02-21 12:51:39 +00001255 mutex_lock(&fp->f_fl_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001256
Andreas Gruenbacher283c9a92017-07-17 13:39:15 -05001257 if (gfs2_holder_initialized(fl_gh)) {
NeilBrown4d62d3f2018-11-30 10:04:08 +11001258 struct file_lock request;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001259 if (fl_gh->gh_state == state)
1260 goto out;
NeilBrown4d62d3f2018-11-30 10:04:08 +11001261 locks_init_lock(&request);
1262 request.fl_type = F_UNLCK;
1263 request.fl_flags = FL_FLOCK;
1264 locks_lock_file_wait(file, &request);
Bob Peterson5bef3e72014-06-26 10:46:25 -04001265 gfs2_glock_dq(fl_gh);
Abhijith Dasb4c20162007-09-13 23:35:27 -05001266 gfs2_holder_reinit(state, flags, fl_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001267 } else {
Steven Whitehouse6802e342008-05-21 17:03:22 +01001268 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1269 &gfs2_flock_glops, CREATE, &gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001270 if (error)
1271 goto out;
Abhijith Dasb4c20162007-09-13 23:35:27 -05001272 gfs2_holder_init(gl, state, flags, fl_gh);
1273 gfs2_glock_put(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001274 }
Bob Peterson2ddfbdd2014-08-20 12:44:45 -04001275 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1276 error = gfs2_glock_nq(fl_gh);
1277 if (error != GLR_TRYFAILED)
1278 break;
1279 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1280 fl_gh->gh_error = 0;
1281 msleep(sleeptime);
1282 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001283 if (error) {
1284 gfs2_holder_uninit(fl_gh);
1285 if (error == GLR_TRYFAILED)
1286 error = -EAGAIN;
1287 } else {
Benjamin Coddington4f656362015-10-22 13:38:14 -04001288 error = locks_lock_file_wait(file, fl);
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001289 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001290 }
1291
Steven Whitehouse420b9e52006-07-31 15:42:17 -04001292out:
Steven Whitehousef55ab262006-02-21 12:51:39 +00001293 mutex_unlock(&fp->f_fl_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001294 return error;
1295}
1296
1297static void do_unflock(struct file *file, struct file_lock *fl)
1298{
Steven Whitehouse5c676f62006-02-27 17:23:27 -05001299 struct gfs2_file *fp = file->private_data;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001300 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1301
Steven Whitehousef55ab262006-02-21 12:51:39 +00001302 mutex_lock(&fp->f_fl_mutex);
Benjamin Coddington4f656362015-10-22 13:38:14 -04001303 locks_lock_file_wait(file, fl);
Andreas Gruenbacher6df9f9a2016-06-17 07:31:27 -05001304 if (gfs2_holder_initialized(fl_gh)) {
Bob Peterson2ddfbdd2014-08-20 12:44:45 -04001305 gfs2_glock_dq(fl_gh);
Steven Whitehouse0a334432011-03-09 11:14:32 +00001306 gfs2_holder_uninit(fl_gh);
1307 }
Steven Whitehousef55ab262006-02-21 12:51:39 +00001308 mutex_unlock(&fp->f_fl_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001309}
1310
1311/**
1312 * gfs2_flock - acquire/release a flock lock on a file
1313 * @file: the file pointer
1314 * @cmd: either modify or retrieve lock state, possibly wait
1315 * @fl: type and range of lock
1316 *
1317 * Returns: errno
1318 */
1319
1320static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1321{
David Teiglandb3b94fa2006-01-16 16:50:04 +00001322 if (!(fl->fl_flags & FL_FLOCK))
1323 return -ENOLCK;
Abhijith Dasa12af1e2009-06-01 12:30:03 -05001324 if (fl->fl_type & LOCK_MAND)
1325 return -EOPNOTSUPP;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001326
David Teiglandb3b94fa2006-01-16 16:50:04 +00001327 if (fl->fl_type == F_UNLCK) {
1328 do_unflock(file, fl);
1329 return 0;
Steven Whitehoused00223f2006-10-02 10:28:05 -04001330 } else {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001331 return do_flock(file, cmd, fl);
Steven Whitehoused00223f2006-10-02 10:28:05 -04001332 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001333}
1334
Christoph Hellwig10d21982009-04-07 19:42:17 +02001335const struct file_operations gfs2_file_fops = {
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001336 .llseek = gfs2_llseek,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +01001337 .read_iter = gfs2_file_read_iter,
Al Viroda56e452014-04-03 14:11:01 -04001338 .write_iter = gfs2_file_write_iter,
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001339 .iopoll = iomap_dio_iopoll,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001340 .unlocked_ioctl = gfs2_ioctl,
Arnd Bergmann8d098072019-06-03 13:40:01 +02001341 .compat_ioctl = gfs2_compat_ioctl,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001342 .mmap = gfs2_mmap,
1343 .open = gfs2_open,
Bob Petersondf3fd112012-04-11 12:56:41 -04001344 .release = gfs2_release,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001345 .fsync = gfs2_fsync,
1346 .lock = gfs2_lock,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001347 .flock = gfs2_flock,
Al Viro82c156f2016-09-22 23:35:42 -04001348 .splice_read = generic_file_splice_read,
Bob Petersonf42a69f2015-02-19 08:02:16 -06001349 .splice_write = gfs2_file_splice_write,
Jeff Layton1c994a02014-08-27 06:49:41 -04001350 .setlease = simple_nosetlease,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001351 .fallocate = gfs2_fallocate,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001352};
1353
Christoph Hellwig10d21982009-04-07 19:42:17 +02001354const struct file_operations gfs2_dir_fops = {
Al Viro1d1bb232016-05-12 17:00:20 -04001355 .iterate_shared = gfs2_readdir,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001356 .unlocked_ioctl = gfs2_ioctl,
Arnd Bergmann8d098072019-06-03 13:40:01 +02001357 .compat_ioctl = gfs2_compat_ioctl,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001358 .open = gfs2_open,
Bob Petersondf3fd112012-04-11 12:56:41 -04001359 .release = gfs2_release,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001360 .fsync = gfs2_fsync,
1361 .lock = gfs2_lock,
1362 .flock = gfs2_flock,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001363 .llseek = default_llseek,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001364};
1365
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001366#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1367
Christoph Hellwig10d21982009-04-07 19:42:17 +02001368const struct file_operations gfs2_file_fops_nolock = {
Wendy Chengc97bfe42007-11-29 17:56:51 -05001369 .llseek = gfs2_llseek,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +01001370 .read_iter = gfs2_file_read_iter,
Al Viroda56e452014-04-03 14:11:01 -04001371 .write_iter = gfs2_file_write_iter,
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001372 .iopoll = iomap_dio_iopoll,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001373 .unlocked_ioctl = gfs2_ioctl,
Arnd Bergmann8d098072019-06-03 13:40:01 +02001374 .compat_ioctl = gfs2_compat_ioctl,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001375 .mmap = gfs2_mmap,
1376 .open = gfs2_open,
Bob Petersondf3fd112012-04-11 12:56:41 -04001377 .release = gfs2_release,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001378 .fsync = gfs2_fsync,
Al Viro82c156f2016-09-22 23:35:42 -04001379 .splice_read = generic_file_splice_read,
Bob Petersonf42a69f2015-02-19 08:02:16 -06001380 .splice_write = gfs2_file_splice_write,
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001381 .setlease = generic_setlease,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001382 .fallocate = gfs2_fallocate,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001383};
1384
Christoph Hellwig10d21982009-04-07 19:42:17 +02001385const struct file_operations gfs2_dir_fops_nolock = {
Al Viro1d1bb232016-05-12 17:00:20 -04001386 .iterate_shared = gfs2_readdir,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001387 .unlocked_ioctl = gfs2_ioctl,
Arnd Bergmann8d098072019-06-03 13:40:01 +02001388 .compat_ioctl = gfs2_compat_ioctl,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001389 .open = gfs2_open,
Bob Petersondf3fd112012-04-11 12:56:41 -04001390 .release = gfs2_release,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001391 .fsync = gfs2_fsync,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001392 .llseek = default_llseek,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001393};
1394