blob: fe305e4bfd37345048aa6859ed75b74c1699987a [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Steven Whitehouse3a8a9a12006-05-18 15:09:15 -04004 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
David Teiglandb3b94fa2006-01-16 16:50:04 +00007#include <linux/slab.h>
8#include <linux/spinlock.h>
Arnd Bergmann8d098072019-06-03 13:40:01 +02009#include <linux/compat.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000010#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/uio.h>
14#include <linux/blkdev.h>
15#include <linux/mm.h>
Miklos Szeredif58ba882008-07-02 21:12:01 +020016#include <linux/mount.h>
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000017#include <linux/fs.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050018#include <linux/gfs2_ondisk.h>
Christoph Hellwig2fe17c12011-01-14 13:07:43 +010019#include <linux/falloc.h>
20#include <linux/swap.h>
Steven Whitehouse71b86f52006-03-28 14:14:04 -050021#include <linux/crc32.h>
Steven Whitehouse33c3de32006-11-30 10:14:32 -050022#include <linux/writeback.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080023#include <linux/uaccess.h>
Steven Whitehousef057f6c2009-01-12 10:43:39 +000024#include <linux/dlm.h>
25#include <linux/dlm_plock.h>
Bob Peterson2ddfbdd2014-08-20 12:44:45 -040026#include <linux/delay.h>
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010027#include <linux/backing-dev.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000028
29#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050030#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000031#include "bmap.h"
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010032#include "aops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000033#include "dir.h"
34#include "glock.h"
35#include "glops.h"
36#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000037#include "log.h"
38#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000039#include "quota.h"
40#include "rgrp.h"
41#include "trans.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050042#include "util.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000043
David Teiglandb3b94fa2006-01-16 16:50:04 +000044/**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
Andrew Morton965c8e52012-12-17 15:59:39 -080048 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
David Teiglandb3b94fa2006-01-16 16:50:04 +000049 *
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
52 *
53 * Returns: The new offset, or errno
54 */
55
Andrew Morton965c8e52012-12-17 15:59:39 -080056static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
David Teiglandb3b94fa2006-01-16 16:50:04 +000057{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040058 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +000059 struct gfs2_holder i_gh;
60 loff_t error;
61
Andrew Morton965c8e52012-12-17 15:59:39 -080062 switch (whence) {
Andreas Gruenbacher3a274112017-03-15 19:12:59 +010063 case SEEK_END:
David Teiglandb3b94fa2006-01-16 16:50:04 +000064 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
65 &i_gh);
66 if (!error) {
Andrew Morton965c8e52012-12-17 15:59:39 -080067 error = generic_file_llseek(file, offset, whence);
David Teiglandb3b94fa2006-01-16 16:50:04 +000068 gfs2_glock_dq_uninit(&i_gh);
69 }
Steven Whitehouse94536152011-08-23 10:19:25 +010070 break;
Andreas Gruenbacher3a274112017-03-15 19:12:59 +010071
72 case SEEK_DATA:
73 error = gfs2_seek_data(file, offset);
74 break;
75
76 case SEEK_HOLE:
77 error = gfs2_seek_hole(file, offset);
78 break;
79
Steven Whitehouse94536152011-08-23 10:19:25 +010080 case SEEK_CUR:
81 case SEEK_SET:
Andreas Gruenbacher3a274112017-03-15 19:12:59 +010082 /*
83 * These don't reference inode->i_size and don't depend on the
84 * block mapping, so we don't need the glock.
85 */
Andrew Morton965c8e52012-12-17 15:59:39 -080086 error = generic_file_llseek(file, offset, whence);
Steven Whitehouse94536152011-08-23 10:19:25 +010087 break;
88 default:
89 error = -EINVAL;
90 }
David Teiglandb3b94fa2006-01-16 16:50:04 +000091
92 return error;
93}
94
David Teiglandb3b94fa2006-01-16 16:50:04 +000095/**
Al Virod81a8ef2013-05-16 14:14:48 -040096 * gfs2_readdir - Iterator for a directory
David Teiglandb3b94fa2006-01-16 16:50:04 +000097 * @file: The directory to read from
Al Virod81a8ef2013-05-16 14:14:48 -040098 * @ctx: What to feed directory entries to
David Teiglandb3b94fa2006-01-16 16:50:04 +000099 *
100 * Returns: errno
101 */
102
Al Virod81a8ef2013-05-16 14:14:48 -0400103static int gfs2_readdir(struct file *file, struct dir_context *ctx)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000104{
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500105 struct inode *dir = file->f_mapping->host;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400106 struct gfs2_inode *dip = GFS2_I(dir);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000107 struct gfs2_holder d_gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000108 int error;
109
Al Virod81a8ef2013-05-16 14:14:48 -0400110 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
111 if (error)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000112 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000113
Al Virod81a8ef2013-05-16 14:14:48 -0400114 error = gfs2_dir_read(dir, ctx, &file->f_ra);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000115
116 gfs2_glock_dq_uninit(&d_gh);
117
David Teiglandb3b94fa2006-01-16 16:50:04 +0000118 return error;
119}
120
Steven Whitehouse128e5eb2006-10-02 11:24:43 -0400121/**
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200122 * fsflag_gfs2flag
Steven Whitehouse128e5eb2006-10-02 11:24:43 -0400123 *
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200124 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
125 * and to GFS2_DIF_JDATA for non-directories.
Steven Whitehouse128e5eb2006-10-02 11:24:43 -0400126 */
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200127static struct {
128 u32 fsflag;
129 u32 gfsflag;
130} fsflag_gfs2flag[] = {
131 {FS_SYNC_FL, GFS2_DIF_SYNC},
132 {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
133 {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
134 {FS_NOATIME_FL, GFS2_DIF_NOATIME},
135 {FS_INDEX_FL, GFS2_DIF_EXHASH},
136 {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
137 {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
Steven Whitehouse7ea9ea82006-03-31 15:01:28 -0500138};
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500139
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700140static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
141{
142 int i;
143 u32 fsflags = 0;
144
145 if (S_ISDIR(inode->i_mode))
146 gfsflags &= ~GFS2_DIF_JDATA;
147 else
148 gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
149
150 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
151 if (gfsflags & fsflag_gfs2flag[i].gfsflag)
152 fsflags |= fsflag_gfs2flag[i].fsflag;
153 return fsflags;
154}
155
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400156static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500157{
Al Viro496ad9a2013-01-23 17:07:38 -0500158 struct inode *inode = file_inode(filp);
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400159 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500160 struct gfs2_holder gh;
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700161 int error;
162 u32 fsflags;
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500163
Steven Whitehouse719ee342008-09-18 13:53:59 +0100164 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
165 error = gfs2_glock_nq(&gh);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500166 if (error)
Daniel DeFreez9c7fe832016-04-19 19:57:45 -0400167 goto out_uninit;
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400168
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700169 fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200170
Steven Whitehouse128e5eb2006-10-02 11:24:43 -0400171 if (put_user(fsflags, ptr))
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500172 error = -EFAULT;
173
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100174 gfs2_glock_dq(&gh);
Daniel DeFreez9c7fe832016-04-19 19:57:45 -0400175out_uninit:
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500176 gfs2_holder_uninit(&gh);
177 return error;
178}
179
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500180void gfs2_set_inode_flags(struct inode *inode)
181{
182 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500183 unsigned int flags = inode->i_flags;
184
Steven Whitehouse9964afb2011-06-16 14:06:55 +0100185 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
186 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
Benjamin Marzinski01e64ee2015-05-05 12:25:48 -0500187 flags |= S_NOSEC;
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000188 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500189 flags |= S_IMMUTABLE;
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000190 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500191 flags |= S_APPEND;
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000192 if (ip->i_diskflags & GFS2_DIF_NOATIME)
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500193 flags |= S_NOATIME;
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000194 if (ip->i_diskflags & GFS2_DIF_SYNC)
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500195 flags |= S_SYNC;
196 inode->i_flags = flags;
197}
198
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500199/* Flags that can be set by user space */
200#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500201 GFS2_DIF_IMMUTABLE| \
202 GFS2_DIF_APPENDONLY| \
203 GFS2_DIF_NOATIME| \
204 GFS2_DIF_SYNC| \
Steven Whitehouse23d0bb82012-05-28 15:26:56 +0100205 GFS2_DIF_TOPDIR| \
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500206 GFS2_DIF_INHERIT_JDATA)
207
208/**
Fabian Frederick9dd868e2014-05-15 18:57:08 +0200209 * do_gfs2_set_flags - set flags on an inode
210 * @filp: file pointer
211 * @reqflags: The flags to set
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500212 * @mask: Indicates which flags are valid
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700213 * @fsflags: The FS_* inode flags passed in
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500214 *
215 */
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700216static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask,
217 const u32 fsflags)
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500218{
Al Viro496ad9a2013-01-23 17:07:38 -0500219 struct inode *inode = file_inode(filp);
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400220 struct gfs2_inode *ip = GFS2_I(inode);
221 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500222 struct buffer_head *bh;
223 struct gfs2_holder gh;
224 int error;
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700225 u32 new_flags, flags, oldflags;
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500226
Al Viroa561be72011-11-23 11:57:51 -0500227 error = mnt_want_write_file(filp);
Abhijith Das52f341c2006-07-21 02:03:21 -0400228 if (error)
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500229 return error;
230
Miklos Szeredif58ba882008-07-02 21:12:01 +0200231 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
232 if (error)
233 goto out_drop_write;
234
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700235 oldflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
236 error = vfs_ioc_setflags_prepare(inode, oldflags, fsflags);
237 if (error)
238 goto out;
239
Steven Whitehouse7df0e032010-05-24 14:36:48 +0100240 error = -EACCES;
Serge E. Hallyn2e149672011-03-23 16:43:26 -0700241 if (!inode_owner_or_capable(inode))
Steven Whitehouse7df0e032010-05-24 14:36:48 +0100242 goto out;
243
244 error = 0;
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000245 flags = ip->i_diskflags;
Steven Whitehouse55eccc62006-04-04 14:29:30 -0400246 new_flags = (flags & ~mask) | (reqflags & mask);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500247 if ((new_flags ^ flags) == 0)
248 goto out;
249
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500250 error = -EPERM;
251 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
252 goto out;
253 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
254 goto out;
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400255 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
Steven Whitehouseb9cb9812006-05-12 17:07:56 -0400256 !capable(CAP_LINUX_IMMUTABLE))
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500257 goto out;
Steven Whitehouseb9cb9812006-05-12 17:07:56 -0400258 if (!IS_IMMUTABLE(inode)) {
Al Viro10556cb22011-06-20 19:28:19 -0400259 error = gfs2_permission(inode, MAY_WRITE);
Steven Whitehouseb9cb9812006-05-12 17:07:56 -0400260 if (error)
261 goto out;
262 }
Steven Whitehouse55610932007-10-17 08:47:38 +0100263 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
Bob Petersoncc555b092017-09-20 08:30:04 -0500264 if (new_flags & GFS2_DIF_JDATA)
Bob Petersonc1696fb2018-01-17 00:01:33 +0100265 gfs2_log_flush(sdp, ip->i_gl,
Bob Peterson805c09072018-01-08 10:34:17 -0500266 GFS2_LOG_HEAD_FLUSH_NORMAL |
267 GFS2_LFC_SET_FLAGS);
Steven Whitehouse55610932007-10-17 08:47:38 +0100268 error = filemap_fdatawrite(inode->i_mapping);
269 if (error)
270 goto out;
271 error = filemap_fdatawait(inode->i_mapping);
272 if (error)
273 goto out;
Bob Petersoncc555b092017-09-20 08:30:04 -0500274 if (new_flags & GFS2_DIF_JDATA)
275 gfs2_ordered_del_inode(ip);
Steven Whitehouse55610932007-10-17 08:47:38 +0100276 }
Steven Whitehouse55eccc62006-04-04 14:29:30 -0400277 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500278 if (error)
279 goto out;
Steven Whitehouse55eccc62006-04-04 14:29:30 -0400280 error = gfs2_meta_inode_buffer(ip, &bh);
281 if (error)
282 goto out_trans_end;
Andreas Gruenbacher9b7c2dd2017-09-22 08:29:19 -0500283 inode->i_ctime = current_time(inode);
Steven Whitehouse350a9b02012-12-14 12:36:02 +0000284 gfs2_trans_add_meta(ip->i_gl, bh);
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000285 ip->i_diskflags = new_flags;
Steven Whitehouse539e5d62006-10-31 15:07:05 -0500286 gfs2_dinode_out(ip, bh->b_data);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500287 brelse(bh);
Steven Whitehouse6b124d82006-11-08 12:51:06 -0500288 gfs2_set_inode_flags(inode);
Steven Whitehouse55610932007-10-17 08:47:38 +0100289 gfs2_set_aops(inode);
Steven Whitehouse55eccc62006-04-04 14:29:30 -0400290out_trans_end:
291 gfs2_trans_end(sdp);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500292out:
293 gfs2_glock_dq_uninit(&gh);
Miklos Szeredif58ba882008-07-02 21:12:01 +0200294out_drop_write:
Al Viro2a79f172011-12-09 08:06:57 -0500295 mnt_drop_write_file(filp);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500296 return error;
297}
298
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400299static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500300{
Al Viro496ad9a2013-01-23 17:07:38 -0500301 struct inode *inode = file_inode(filp);
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200302 u32 fsflags, gfsflags = 0;
303 u32 mask;
304 int i;
Steven Whitehouse7df0e032010-05-24 14:36:48 +0100305
Steven Whitehouse128e5eb2006-10-02 11:24:43 -0400306 if (get_user(fsflags, ptr))
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500307 return -EFAULT;
Steven Whitehouse7df0e032010-05-24 14:36:48 +0100308
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200309 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
310 if (fsflags & fsflag_gfs2flag[i].fsflag) {
311 fsflags &= ~fsflag_gfs2flag[i].fsflag;
312 gfsflags |= fsflag_gfs2flag[i].gfsflag;
313 }
Steven Whitehouseb9af7ca2007-07-18 11:40:06 +0100314 }
Andreas Gruenbacherb16f7e52017-10-09 16:15:30 +0200315 if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
316 return -EINVAL;
317
318 mask = GFS2_FLAGS_USER_SET;
319 if (S_ISDIR(inode->i_mode)) {
320 mask &= ~GFS2_DIF_JDATA;
321 } else {
322 /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
323 if (gfsflags & GFS2_DIF_TOPDIR)
324 return -EINVAL;
325 mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
326 }
327
Darrick J. Wong5aca2842019-07-01 08:25:34 -0700328 return do_gfs2_set_flags(filp, gfsflags, mask, fsflags);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500329}
330
Steve Whitehouse6ddc5c32018-10-08 14:32:35 -0500331static int gfs2_getlabel(struct file *filp, char __user *label)
332{
333 struct inode *inode = file_inode(filp);
334 struct gfs2_sbd *sdp = GFS2_SB(inode);
335
336 if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
337 return -EFAULT;
338
339 return 0;
340}
341
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400342static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500343{
344 switch(cmd) {
Steven Whitehouse128e5eb2006-10-02 11:24:43 -0400345 case FS_IOC_GETFLAGS:
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400346 return gfs2_get_flags(filp, (u32 __user *)arg);
Steven Whitehouse128e5eb2006-10-02 11:24:43 -0400347 case FS_IOC_SETFLAGS:
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400348 return gfs2_set_flags(filp, (u32 __user *)arg);
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000349 case FITRIM:
350 return gfs2_fitrim(filp, (void __user *)arg);
Steve Whitehouse6ddc5c32018-10-08 14:32:35 -0500351 case FS_IOC_GETFSLABEL:
352 return gfs2_getlabel(filp, (char __user *)arg);
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500353 }
Steve Whitehouse6ddc5c32018-10-08 14:32:35 -0500354
Steven Whitehouse71b86f52006-03-28 14:14:04 -0500355 return -ENOTTY;
356}
357
Arnd Bergmann8d098072019-06-03 13:40:01 +0200358#ifdef CONFIG_COMPAT
359static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
360{
361 switch(cmd) {
362 /* These are just misnamed, they actually get/put from/to user an int */
363 case FS_IOC32_GETFLAGS:
364 cmd = FS_IOC_GETFLAGS;
365 break;
366 case FS_IOC32_SETFLAGS:
367 cmd = FS_IOC_SETFLAGS;
368 break;
369 /* Keep this list in sync with gfs2_ioctl */
370 case FITRIM:
371 case FS_IOC_GETFSLABEL:
372 break;
373 default:
374 return -ENOIOCTLCMD;
375 }
376
377 return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
378}
379#else
380#define gfs2_compat_ioctl NULL
381#endif
382
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100383/**
Steven Whitehouseda1dfb62012-07-26 11:30:54 +0100384 * gfs2_size_hint - Give a hint to the size of a write request
Fabian Frederick9dd868e2014-05-15 18:57:08 +0200385 * @filep: The struct file
Steven Whitehouseda1dfb62012-07-26 11:30:54 +0100386 * @offset: The file offset of the write
387 * @size: The length of the write
388 *
389 * When we are about to do a write, this function records the total
390 * write size in order to provide a suitable hint to the lower layers
391 * about how many blocks will be required.
392 *
393 */
394
395static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
396{
Al Viro496ad9a2013-01-23 17:07:38 -0500397 struct inode *inode = file_inode(filep);
Steven Whitehouseda1dfb62012-07-26 11:30:54 +0100398 struct gfs2_sbd *sdp = GFS2_SB(inode);
399 struct gfs2_inode *ip = GFS2_I(inode);
400 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
401 int hint = min_t(size_t, INT_MAX, blks);
402
Andreas Gruenbacher21f09c42018-08-30 16:01:50 +0100403 if (hint > atomic_read(&ip->i_sizehint))
404 atomic_set(&ip->i_sizehint, hint);
Steven Whitehouseda1dfb62012-07-26 11:30:54 +0100405}
406
407/**
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200408 * gfs2_allocate_page_backing - Allocate blocks for a write fault
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100409 * @page: The (locked) page to allocate backing for
Andreas Gruenbacherf53056c2019-11-07 18:06:14 +0000410 * @length: Size of the allocation
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100411 *
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200412 * We try to allocate all the blocks required for the page in one go. This
413 * might fail for various reasons, so we keep trying until all the blocks to
414 * back this page are allocated. If some of the blocks are already allocated,
415 * that is ok too.
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100416 */
Andreas Gruenbacherf53056c2019-11-07 18:06:14 +0000417static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100418{
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200419 u64 pos = page_offset(page);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100420
421 do {
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200422 struct iomap iomap = { };
423
Andreas Gruenbacherf53056c2019-11-07 18:06:14 +0000424 if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap))
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100425 return -EIO;
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200426
Andreas Gruenbacherf53056c2019-11-07 18:06:14 +0000427 if (length < iomap.length)
428 iomap.length = length;
429 length -= iomap.length;
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200430 pos += iomap.length;
Andreas Gruenbacherf53056c2019-11-07 18:06:14 +0000431 } while (length > 0);
Christoph Hellwig35af80a2019-07-01 23:54:38 +0200432
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100433 return 0;
434}
435
436/**
437 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
438 * @vma: The virtual memory area
Fabian Frederick9dd868e2014-05-15 18:57:08 +0200439 * @vmf: The virtual memory fault containing the page to become writable
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100440 *
441 * When the page becomes writable, we need to ensure that we have
442 * blocks allocated on disk to back that page.
443 */
444
Souptick Joarder109dbb12018-07-02 22:16:13 +0530445static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100446{
Nick Pigginc2ec1752009-03-31 15:23:21 -0700447 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -0800448 struct inode *inode = file_inode(vmf->vma->vm_file);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100449 struct gfs2_inode *ip = GFS2_I(inode);
450 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100451 struct gfs2_alloc_parms ap = { .aflags = 0, };
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000452 u64 offset = page_offset(page);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100453 unsigned int data_blocks, ind_blocks, rblocks;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100454 struct gfs2_holder gh;
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000455 unsigned int length;
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100456 loff_t size;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100457 int ret;
458
Jan Kara39263d5e2012-06-12 16:20:41 +0200459 sb_start_pagefault(inode->i_sb);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100460
Steven Whitehouse719ee342008-09-18 13:53:59 +0100461 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
462 ret = gfs2_glock_nq(&gh);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100463 if (ret)
Bob Peterson2b3dcf32013-05-28 10:04:44 -0400464 goto out_uninit;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100465
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000466 /* Check page index against inode size */
467 size = i_size_read(inode);
468 if (offset >= size) {
469 ret = -EINVAL;
470 goto out_unlock;
471 }
472
Andreas Gruenbacherd7c436c2016-09-26 13:20:19 -0500473 /* Update file times before taking page lock */
Dave Jiang11bac802017-02-24 14:56:41 -0800474 file_update_time(vmf->vma->vm_file);
Andreas Gruenbacherd7c436c2016-09-26 13:20:19 -0500475
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000476 /* page is wholly or partially inside EOF */
477 if (offset > size - PAGE_SIZE)
478 length = offset_in_page(size);
479 else
480 length = PAGE_SIZE;
481
482 gfs2_size_hint(vmf->vma->vm_file, offset, length);
483
Steven Whitehouse9c538832009-03-19 13:15:44 +0000484 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
485 set_bit(GIF_SW_PAGED, &ip->i_flags);
486
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000487 /*
488 * iomap_writepage / iomap_writepages currently don't support inline
489 * files, so always unstuff here.
490 */
491
492 if (!gfs2_is_stuffed(ip) &&
493 !gfs2_write_alloc_required(ip, offset, length)) {
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100494 lock_page(page);
495 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
496 ret = -EAGAIN;
497 unlock_page(page);
498 }
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100499 goto out_unlock;
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100500 }
501
Bob Peterson5407e242012-05-18 09:28:23 -0400502 ret = gfs2_rindex_update(sdp);
503 if (ret)
Steven Whitehouse6dbd8222008-01-10 15:18:55 +0000504 goto out_unlock;
505
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000506 gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100507 ap.target = data_blocks + ind_blocks;
Abhi Dasb8fbf472015-03-18 12:03:41 -0500508 ret = gfs2_quota_lock_check(ip, &ap);
509 if (ret)
510 goto out_unlock;
Steven Whitehouse7b9cff42013-10-02 11:13:25 +0100511 ret = gfs2_inplace_reserve(ip, &ap);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100512 if (ret)
513 goto out_quota_unlock;
514
515 rblocks = RES_DINODE + ind_blocks;
516 if (gfs2_is_jdata(ip))
517 rblocks += data_blocks ? data_blocks : 1;
Benjamin Marzinskibf97b672010-09-27 16:00:04 -0500518 if (ind_blocks || data_blocks) {
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100519 rblocks += RES_STATFS + RES_QUOTA;
Steven Whitehouse71f890f2012-07-30 14:53:19 +0100520 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
Benjamin Marzinskibf97b672010-09-27 16:00:04 -0500521 }
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100522 ret = gfs2_trans_begin(sdp, rblocks, 0);
523 if (ret)
524 goto out_trans_fail;
525
526 lock_page(page);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100527 ret = -EAGAIN;
528 /* If truncated, we must retry the operation, we may have raced
529 * with the glock demotion code.
530 */
531 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
532 goto out_trans_end;
533
534 /* Unstuff, if required, and allocate backing blocks for page */
535 ret = 0;
536 if (gfs2_is_stuffed(ip))
537 ret = gfs2_unstuff_dinode(ip, page);
538 if (ret == 0)
Andreas Gruenbacher184b4e62019-11-06 14:09:25 +0000539 ret = gfs2_allocate_page_backing(page, length);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100540
541out_trans_end:
542 if (ret)
543 unlock_page(page);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100544 gfs2_trans_end(sdp);
545out_trans_fail:
546 gfs2_inplace_release(ip);
547out_quota_unlock:
548 gfs2_quota_unlock(ip);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100549out_unlock:
550 gfs2_glock_dq(&gh);
Bob Peterson2b3dcf32013-05-28 10:04:44 -0400551out_uninit:
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100552 gfs2_holder_uninit(&gh);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100553 if (ret == 0) {
554 set_page_dirty(page);
Darrick J. Wong1d1d1a72013-02-21 16:42:51 -0800555 wait_for_stable_page(page);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100556 }
Jan Kara39263d5e2012-06-12 16:20:41 +0200557 sb_end_pagefault(inode->i_sb);
Steven Whitehouse13d921e2011-09-07 15:12:51 +0100558 return block_page_mkwrite_return(ret);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100559}
560
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400561static const struct vm_operations_struct gfs2_vm_ops = {
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100562 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700563 .map_pages = filemap_map_pages,
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100564 .page_mkwrite = gfs2_page_mkwrite,
565};
566
David Teiglandb3b94fa2006-01-16 16:50:04 +0000567/**
568 * gfs2_mmap -
569 * @file: The file to map
570 * @vma: The VMA which described the mapping
571 *
Steven Whitehouse48bf2b12009-04-29 13:59:35 +0100572 * There is no need to get a lock here unless we should be updating
573 * atime. We ignore any locking errors since the only consequence is
574 * a missed atime update (which will just be deferred until later).
575 *
576 * Returns: 0
David Teiglandb3b94fa2006-01-16 16:50:04 +0000577 */
578
579static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
580{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400581 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000582
Steven Whitehouseb9c93bb2011-02-02 14:48:10 +0000583 if (!(file->f_flags & O_NOATIME) &&
584 !IS_NOATIME(&ip->i_inode)) {
Steven Whitehouse48bf2b12009-04-29 13:59:35 +0100585 struct gfs2_holder i_gh;
586 int error;
587
Benjamin Marzinski3d162682012-11-06 00:49:28 -0600588 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
589 &i_gh);
Steven Whitehouseb9c93bb2011-02-02 14:48:10 +0000590 if (error)
591 return error;
Benjamin Marzinski3d162682012-11-06 00:49:28 -0600592 /* grab lock to update inode */
593 gfs2_glock_dq_uninit(&i_gh);
594 file_accessed(file);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000595 }
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100596 vma->vm_ops = &gfs2_vm_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000597
Steven Whitehouse48bf2b12009-04-29 13:59:35 +0100598 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000599}
600
601/**
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100602 * gfs2_open_common - This is common to open and atomic_open
603 * @inode: The inode being opened
604 * @file: The file being opened
605 *
606 * This maybe called under a glock or not depending upon how it has
607 * been called. We must always be called under a glock for regular
608 * files, however. For other file types, it does not matter whether
609 * we hold the glock or not.
610 *
611 * Returns: Error code or 0 for success
612 */
613
614int gfs2_open_common(struct inode *inode, struct file *file)
615{
616 struct gfs2_file *fp;
617 int ret;
618
619 if (S_ISREG(inode->i_mode)) {
620 ret = generic_file_open(inode, file);
621 if (ret)
622 return ret;
623 }
624
625 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
626 if (!fp)
627 return -ENOMEM;
628
629 mutex_init(&fp->f_fl_mutex);
630
631 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
632 file->private_data = fp;
Bob Peterson2fba46a2020-02-27 12:47:53 -0600633 if (file->f_mode & FMODE_WRITE) {
634 ret = gfs2_qa_get(GFS2_I(inode));
635 if (ret)
636 goto fail;
637 }
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100638 return 0;
Bob Peterson2fba46a2020-02-27 12:47:53 -0600639
640fail:
641 kfree(file->private_data);
642 file->private_data = NULL;
643 return ret;
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100644}
645
646/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000647 * gfs2_open - open a file
648 * @inode: the inode to open
649 * @file: the struct file for this opening
650 *
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100651 * After atomic_open, this function is only used for opening files
652 * which are already cached. We must still get the glock for regular
653 * files to ensure that we have the file size uptodate for the large
654 * file check which is in the common code. That is only an issue for
655 * regular files though.
656 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000657 * Returns: errno
658 */
659
660static int gfs2_open(struct inode *inode, struct file *file)
661{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400662 struct gfs2_inode *ip = GFS2_I(inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000663 struct gfs2_holder i_gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000664 int error;
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100665 bool need_unlock = false;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000666
Steven Whitehouseb60623c2006-11-01 12:22:46 -0500667 if (S_ISREG(ip->i_inode.i_mode)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000668 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
669 &i_gh);
670 if (error)
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100671 return error;
672 need_unlock = true;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000673 }
674
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100675 error = gfs2_open_common(inode, file);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000676
Steven Whitehouse6d4ade92013-06-14 11:17:15 +0100677 if (need_unlock)
678 gfs2_glock_dq_uninit(&i_gh);
679
David Teiglandb3b94fa2006-01-16 16:50:04 +0000680 return error;
681}
682
683/**
Bob Petersondf3fd112012-04-11 12:56:41 -0400684 * gfs2_release - called to close a struct file
David Teiglandb3b94fa2006-01-16 16:50:04 +0000685 * @inode: the inode the struct file belongs to
686 * @file: the struct file being closed
687 *
688 * Returns: errno
689 */
690
Bob Petersondf3fd112012-04-11 12:56:41 -0400691static int gfs2_release(struct inode *inode, struct file *file)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000692{
Bob Peterson0a305e42012-06-06 11:17:59 +0100693 struct gfs2_inode *ip = GFS2_I(inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000694
Bob Peterson8e2e0042012-07-19 08:12:40 -0400695 kfree(file->private_data);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500696 file->private_data = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000697
Andreas Gruenbacher15955482020-03-06 10:32:35 -0600698 if (file->f_mode & FMODE_WRITE) {
699 gfs2_rs_delete(ip, &inode->i_writecount);
700 gfs2_qa_put(ip);
701 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000702 return 0;
703}
704
705/**
706 * gfs2_fsync - sync the dirty data for a file (across the cluster)
Josef Bacik02c24a82011-07-16 20:44:56 -0400707 * @file: the file that points to the dentry
708 * @start: the start position in the file to sync
709 * @end: the end position in the file to sync
Steven Whitehousedba898b2011-04-14 09:54:02 +0100710 * @datasync: set if we can ignore timestamp changes
David Teiglandb3b94fa2006-01-16 16:50:04 +0000711 *
Steven Whitehouse2f0264d2011-07-27 10:58:48 +0100712 * We split the data flushing here so that we don't wait for the data
713 * until after we've also sent the metadata to disk. Note that for
714 * data=ordered, we will write & wait for the data at the log flush
715 * stage anyway, so this is unlikely to make much of a difference
716 * except in the data=writeback case.
717 *
718 * If the fdatawrite fails due to any reason except -EIO, we will
719 * continue the remainder of the fsync, although we'll still report
720 * the error at the end. This is to match filemap_write_and_wait_range()
721 * behaviour.
Steven Whitehouse34126f92006-12-07 09:13:14 -0500722 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000723 * Returns: errno
724 */
725
Josef Bacik02c24a82011-07-16 20:44:56 -0400726static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
727 int datasync)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000728{
Steven Whitehouse2f0264d2011-07-27 10:58:48 +0100729 struct address_space *mapping = file->f_mapping;
730 struct inode *inode = mapping->host;
Theodore Ts'o0ae45f62015-02-02 00:37:00 -0500731 int sync_state = inode->i_state & I_DIRTY_ALL;
Steven Whitehousedba898b2011-04-14 09:54:02 +0100732 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse87654892011-11-08 14:04:20 +0000733 int ret = 0, ret1 = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000734
Steven Whitehouse2f0264d2011-07-27 10:58:48 +0100735 if (mapping->nrpages) {
736 ret1 = filemap_fdatawrite_range(mapping, start, end);
737 if (ret1 == -EIO)
738 return ret1;
739 }
Josef Bacik02c24a82011-07-16 20:44:56 -0400740
Benjamin Marzinski0c901802013-09-03 16:59:42 -0500741 if (!gfs2_is_jdata(ip))
742 sync_state &= ~I_DIRTY_PAGES;
Steven Whitehousedba898b2011-04-14 09:54:02 +0100743 if (datasync)
Theodore Ts'o0ae45f62015-02-02 00:37:00 -0500744 sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
Steven Whitehousedba898b2011-04-14 09:54:02 +0100745
746 if (sync_state) {
747 ret = sync_inode_metadata(inode, 1);
Steven Whitehouseb5b24d72011-09-07 10:33:25 +0100748 if (ret)
Steven Whitehousedba898b2011-04-14 09:54:02 +0100749 return ret;
Steven Whitehousef1818522011-08-05 10:12:47 +0100750 if (gfs2_is_jdata(ip))
Jeff Laytond07a6ac2017-07-07 15:20:53 -0400751 ret = file_write_and_wait(file);
752 if (ret)
753 return ret;
Steven Whitehouseb5b24d72011-09-07 10:33:25 +0100754 gfs2_ail_flush(ip->i_gl, 1);
Steven Whitehouse33c3de32006-11-30 10:14:32 -0500755 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000756
Steven Whitehouse2f0264d2011-07-27 10:58:48 +0100757 if (mapping->nrpages)
Jeff Laytond07a6ac2017-07-07 15:20:53 -0400758 ret = file_fdatawait_range(file, start, end);
Steven Whitehouse2f0264d2011-07-27 10:58:48 +0100759
760 return ret ? ret : ret1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000761}
762
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100763static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
764{
765 struct file *file = iocb->ki_filp;
766 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
767 size_t count = iov_iter_count(to);
768 struct gfs2_holder gh;
769 ssize_t ret;
770
771 if (!count)
772 return 0; /* skip atime */
773
774 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
775 ret = gfs2_glock_nq(&gh);
776 if (ret)
777 goto out_uninit;
778
Jan Kara13ef9542019-10-15 08:43:42 -0700779 ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
780 is_sync_kiocb(iocb));
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100781
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100782 gfs2_glock_dq(&gh);
783out_uninit:
784 gfs2_holder_uninit(&gh);
785 return ret;
786}
787
788static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
789{
790 struct file *file = iocb->ki_filp;
791 struct inode *inode = file->f_mapping->host;
792 struct gfs2_inode *ip = GFS2_I(inode);
793 size_t len = iov_iter_count(from);
794 loff_t offset = iocb->ki_pos;
795 struct gfs2_holder gh;
796 ssize_t ret;
797
798 /*
799 * Deferred lock, even if its a write, since we do no allocation on
800 * this path. All we need to change is the atime, and this lock mode
801 * ensures that other nodes have flushed their buffered read caches
802 * (i.e. their page cache entries for this inode). We do not,
803 * unfortunately, have the option of only flushing a range like the
804 * VFS does.
805 */
806 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
807 ret = gfs2_glock_nq(&gh);
808 if (ret)
809 goto out_uninit;
810
811 /* Silently fall back to buffered I/O when writing beyond EOF */
812 if (offset + len > i_size_read(&ip->i_inode))
813 goto out;
814
Jan Kara13ef9542019-10-15 08:43:42 -0700815 ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
816 is_sync_kiocb(iocb));
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100817
818out:
819 gfs2_glock_dq(&gh);
820out_uninit:
821 gfs2_holder_uninit(&gh);
822 return ret;
823}
824
825static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
826{
827 ssize_t ret;
828
829 if (iocb->ki_flags & IOCB_DIRECT) {
830 ret = gfs2_file_direct_read(iocb, to);
831 if (likely(ret != -ENOTBLK))
832 return ret;
833 iocb->ki_flags &= ~IOCB_DIRECT;
834 }
835 return generic_file_read_iter(iocb, to);
836}
837
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000838/**
Al Viroda56e452014-04-03 14:11:01 -0400839 * gfs2_file_write_iter - Perform a write to a file
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000840 * @iocb: The io context
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100841 * @from: The data to write
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000842 *
843 * We have to do a lock/unlock here to refresh the inode size for
844 * O_APPEND writes, otherwise we can land up writing at the wrong
845 * offset. There is still a race, but provided the app is using its
846 * own file locking, this will make O_APPEND work as expected.
847 *
848 */
849
Al Viroda56e452014-04-03 14:11:01 -0400850static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000851{
852 struct file *file = iocb->ki_filp;
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100853 struct inode *inode = file_inode(file);
854 struct gfs2_inode *ip = GFS2_I(inode);
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100855 ssize_t ret;
Bob Peterson0a305e42012-06-06 11:17:59 +0100856
Al Viroda56e452014-04-03 14:11:01 -0400857 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
Steven Whitehouseda1dfb62012-07-26 11:30:54 +0100858
Al Viro2ba48ce2015-04-09 13:52:01 -0400859 if (iocb->ki_flags & IOCB_APPEND) {
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000860 struct gfs2_holder gh;
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000861
862 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
863 if (ret)
Andreas Gruenbacher4bd684b2020-03-06 10:51:41 -0600864 return ret;
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000865 gfs2_glock_dq_uninit(&gh);
866 }
867
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100868 inode_lock(inode);
869 ret = generic_write_checks(iocb, from);
870 if (ret <= 0)
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100871 goto out_unlock;
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100872
873 ret = file_remove_privs(file);
874 if (ret)
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100875 goto out_unlock;
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100876
877 ret = file_update_time(file);
878 if (ret)
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100879 goto out_unlock;
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100880
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100881 if (iocb->ki_flags & IOCB_DIRECT) {
882 struct address_space *mapping = file->f_mapping;
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100883 ssize_t buffered, ret2;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100884
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100885 ret = gfs2_file_direct_write(iocb, from);
886 if (ret < 0 || !iov_iter_count(from))
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100887 goto out_unlock;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100888
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100889 iocb->ki_flags |= IOCB_DSYNC;
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100890 current->backing_dev_info = inode_to_bdi(inode);
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100891 buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100892 current->backing_dev_info = NULL;
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100893 if (unlikely(buffered <= 0))
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100894 goto out_unlock;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100895
896 /*
897 * We need to ensure that the page cache pages are written to
898 * disk and invalidated to preserve the expected O_DIRECT
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100899 * semantics. If the writeback or invalidate fails, only report
900 * the direct I/O range as we don't know if the buffered pages
901 * made it to disk.
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100902 */
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100903 iocb->ki_pos += buffered;
904 ret2 = generic_write_sync(iocb, buffered);
905 invalidate_mapping_pages(mapping,
906 (iocb->ki_pos - buffered) >> PAGE_SHIFT,
907 (iocb->ki_pos - 1) >> PAGE_SHIFT);
908 if (!ret || ret2 > 0)
909 ret += ret2;
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100910 } else {
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100911 current->backing_dev_info = inode_to_bdi(inode);
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100912 ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100913 current->backing_dev_info = NULL;
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100914 if (likely(ret > 0)) {
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100915 iocb->ki_pos += ret;
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100916 ret = generic_write_sync(iocb, ret);
917 }
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100918 }
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100919
Christoph Hellwig4c0e8dd2020-01-15 16:38:29 +0100920out_unlock:
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100921 inode_unlock(inode);
Andreas Gruenbacher6e5e41e2020-01-14 17:12:18 +0100922 return ret;
Steven Whitehouse56aa6162009-12-08 10:25:33 +0000923}
924
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100925static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
926 int mode)
927{
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700928 struct super_block *sb = inode->i_sb;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100929 struct gfs2_inode *ip = GFS2_I(inode);
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700930 loff_t end = offset + len;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100931 struct buffer_head *dibh;
932 int error;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100933
934 error = gfs2_meta_inode_buffer(ip, &dibh);
935 if (unlikely(error))
Benjamin Marzinski64dd1532011-09-12 18:15:24 -0500936 return error;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100937
Steven Whitehouse350a9b02012-12-14 12:36:02 +0000938 gfs2_trans_add_meta(ip->i_gl, dibh);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100939
940 if (gfs2_is_stuffed(ip)) {
941 error = gfs2_unstuff_dinode(ip, NULL);
942 if (unlikely(error))
943 goto out;
944 }
945
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700946 while (offset < end) {
Andreas Gruenbacherc25892822018-07-06 23:05:41 +0100947 struct iomap iomap = { };
948
Andreas Gruenbacher628e3662018-06-04 07:56:51 -0500949 error = gfs2_iomap_get_alloc(inode, offset, end - offset,
950 &iomap);
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700951 if (error)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100952 goto out;
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700953 offset = iomap.offset + iomap.length;
Andreas Gruenbacherd505a96a2018-06-24 10:43:49 +0100954 if (!(iomap.flags & IOMAP_F_NEW))
Benjamin Marzinski64dd1532011-09-12 18:15:24 -0500955 continue;
Andreas Gruenbacherfffb6412018-03-29 06:50:32 -0700956 error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
957 iomap.length >> inode->i_blkbits,
958 GFP_NOFS);
959 if (error) {
960 fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
Benjamin Marzinski64dd1532011-09-12 18:15:24 -0500961 goto out;
962 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100963 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100964out:
Benjamin Marzinski64dd1532011-09-12 18:15:24 -0500965 brelse(dibh);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100966 return error;
967}
Andreas Gruenbacherf3b64b52019-08-31 21:29:12 +0100968
Abhi Dasd9be0cd2015-03-18 12:05:15 -0500969/**
970 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
971 * blocks, determine how many bytes can be written.
972 * @ip: The inode in question.
973 * @len: Max cap of bytes. What we return in *len must be <= this.
974 * @data_blocks: Compute and return the number of data blocks needed
975 * @ind_blocks: Compute and return the number of indirect blocks needed
976 * @max_blocks: The total blocks available to work with.
977 *
978 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
979 */
980static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
981 unsigned int *data_blocks, unsigned int *ind_blocks,
982 unsigned int max_blocks)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100983{
Abhi Dasd9be0cd2015-03-18 12:05:15 -0500984 loff_t max = *len;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100985 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100986 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
987
988 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
989 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
990 max_data -= tmp;
991 }
Abhi Dasd9be0cd2015-03-18 12:05:15 -0500992
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100993 *data_blocks = max_data;
994 *ind_blocks = max_blocks - max_data;
995 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
996 if (*len > max) {
997 *len = max;
998 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
999 }
1000}
1001
Andrew Price9c9f1152014-11-12 17:24:03 +00001002static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001003{
Al Viro496ad9a2013-01-23 17:07:38 -05001004 struct inode *inode = file_inode(file);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001005 struct gfs2_sbd *sdp = GFS2_SB(inode);
1006 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001007 struct gfs2_alloc_parms ap = { .aflags = 0, };
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001008 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
Andreas Gruenbacher174d1232018-02-20 08:03:24 -07001009 loff_t bytes, max_bytes, max_blks;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001010 int error;
Steven Whitehouse4442f2e2011-11-21 10:01:25 +00001011 const loff_t pos = offset;
1012 const loff_t count = len;
Benjamin Marzinski6905d9e2011-04-26 01:13:24 -05001013 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001014 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
Benjamin Marzinski64dd1532011-09-12 18:15:24 -05001015 loff_t max_chunk_size = UINT_MAX & bsize_mask;
Bob Petersona0846a52014-02-06 10:43:50 -05001016
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001017 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1018
Benjamin Marzinski6905d9e2011-04-26 01:13:24 -05001019 offset &= bsize_mask;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001020
1021 len = next - offset;
1022 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1023 if (!bytes)
1024 bytes = UINT_MAX;
Benjamin Marzinski6905d9e2011-04-26 01:13:24 -05001025 bytes &= bsize_mask;
1026 if (bytes == 0)
1027 bytes = sdp->sd_sb.sb_bsize;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001028
Steven Whitehouseda1dfb62012-07-26 11:30:54 +01001029 gfs2_size_hint(file, offset, len);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001030
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001031 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
1032 ap.min_target = data_blocks + ind_blocks;
1033
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001034 while (len > 0) {
1035 if (len < bytes)
1036 bytes = len;
Benjamin Marzinski58a7d5f2012-03-08 13:16:32 -06001037 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
1038 len -= bytes;
1039 offset += bytes;
1040 continue;
1041 }
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001042
1043 /* We need to determine how many bytes we can actually
1044 * fallocate without exceeding quota or going over the
1045 * end of the fs. We start off optimistically by assuming
1046 * we can write max_bytes */
1047 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
1048
1049 /* Since max_bytes is most likely a theoretical max, we
1050 * calculate a more realistic 'bytes' to serve as a good
1051 * starting point for the number of bytes we may be able
1052 * to write */
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001053 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001054 ap.target = data_blocks + ind_blocks;
Abhi Dasb8fbf472015-03-18 12:03:41 -05001055
1056 error = gfs2_quota_lock_check(ip, &ap);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001057 if (error)
1058 return error;
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001059 /* ap.allowed tells us how many blocks quota will allow
1060 * us to write. Check if this reduces max_blks */
Andreas Gruenbacher174d1232018-02-20 08:03:24 -07001061 max_blks = UINT_MAX;
1062 if (ap.allowed)
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001063 max_blks = ap.allowed;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001064
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001065 error = gfs2_inplace_reserve(ip, &ap);
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001066 if (error)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001067 goto out_qunlock;
Abhi Dasd9be0cd2015-03-18 12:05:15 -05001068
1069 /* check if the selected rgrp limits our max_blks further */
1070 if (ap.allowed && ap.allowed < max_blks)
1071 max_blks = ap.allowed;
1072
1073 /* Almost done. Calculate bytes that can be written using
1074 * max_blks. We also recompute max_bytes, data_blocks and
1075 * ind_blocks */
1076 calc_max_reserv(ip, &max_bytes, &data_blocks,
1077 &ind_blocks, max_blks);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001078
1079 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
Steven Whitehouse71f890f2012-07-30 14:53:19 +01001080 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001081 if (gfs2_is_jdata(ip))
1082 rblocks += data_blocks ? data_blocks : 1;
1083
1084 error = gfs2_trans_begin(sdp, rblocks,
Andreas Gruenbacher45eb0502019-09-02 17:31:06 +01001085 PAGE_SIZE >> inode->i_blkbits);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001086 if (error)
1087 goto out_trans_fail;
1088
1089 error = fallocate_chunk(inode, offset, max_bytes, mode);
1090 gfs2_trans_end(sdp);
1091
1092 if (error)
1093 goto out_trans_fail;
1094
1095 len -= max_bytes;
1096 offset += max_bytes;
1097 gfs2_inplace_release(ip);
1098 gfs2_quota_unlock(ip);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001099 }
Steven Whitehouse4442f2e2011-11-21 10:01:25 +00001100
Andreas Gruenbacher0a6a4ab2019-08-08 19:29:54 +01001101 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size)
Andrew Price18858672014-11-12 17:24:04 +00001102 i_size_write(inode, pos + count);
Andreas Gruenbacher0a6a4ab2019-08-08 19:29:54 +01001103 file_update_time(file);
1104 mark_inode_dirty(inode);
Andrew Price18858672014-11-12 17:24:04 +00001105
Christoph Hellwigdde0c2e2016-04-07 08:52:00 -07001106 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
1107 return vfs_fsync_range(file, pos, pos + count - 1,
1108 (file->f_flags & __O_SYNC) ? 0 : 1);
1109 return 0;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001110
1111out_trans_fail:
1112 gfs2_inplace_release(ip);
1113out_qunlock:
1114 gfs2_quota_unlock(ip);
Andrew Price9c9f1152014-11-12 17:24:03 +00001115 return error;
1116}
1117
1118static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1119{
1120 struct inode *inode = file_inode(file);
Andrew Priced4d7fc12017-04-05 11:45:26 -04001121 struct gfs2_sbd *sdp = GFS2_SB(inode);
Andrew Price9c9f1152014-11-12 17:24:03 +00001122 struct gfs2_inode *ip = GFS2_I(inode);
1123 struct gfs2_holder gh;
1124 int ret;
1125
Andreas Gruenbacher4e56a642017-12-14 17:11:03 +01001126 if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
Andrew Priced4d7fc12017-04-05 11:45:26 -04001127 return -EOPNOTSUPP;
1128 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
1129 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
Andrew Price9c9f1152014-11-12 17:24:03 +00001130 return -EOPNOTSUPP;
1131
Al Viro59551022016-01-22 15:40:57 -05001132 inode_lock(inode);
Andrew Price9c9f1152014-11-12 17:24:03 +00001133
1134 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1135 ret = gfs2_glock_nq(&gh);
1136 if (ret)
1137 goto out_uninit;
1138
1139 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1140 (offset + len) > inode->i_size) {
1141 ret = inode_newsize_ok(inode, offset + len);
1142 if (ret)
1143 goto out_unlock;
1144 }
1145
1146 ret = get_write_access(inode);
1147 if (ret)
1148 goto out_unlock;
1149
Andreas Gruenbacher4e56a642017-12-14 17:11:03 +01001150 if (mode & FALLOC_FL_PUNCH_HOLE) {
1151 ret = __gfs2_punch_hole(file, offset, len);
1152 } else {
Andreas Gruenbacher4e56a642017-12-14 17:11:03 +01001153 ret = __gfs2_fallocate(file, mode, offset, len);
Andreas Gruenbacher4e56a642017-12-14 17:11:03 +01001154 if (ret)
1155 gfs2_rs_deltree(&ip->i_res);
1156 }
Bob Petersona097dc7e2015-07-16 08:28:04 -05001157
Andrew Price9c9f1152014-11-12 17:24:03 +00001158 put_write_access(inode);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001159out_unlock:
Bob Petersona0846a52014-02-06 10:43:50 -05001160 gfs2_glock_dq(&gh);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001161out_uninit:
Bob Petersona0846a52014-02-06 10:43:50 -05001162 gfs2_holder_uninit(&gh);
Al Viro59551022016-01-22 15:40:57 -05001163 inode_unlock(inode);
Andrew Price9c9f1152014-11-12 17:24:03 +00001164 return ret;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001165}
1166
Bob Petersonf1ea6f42015-02-24 07:22:28 -06001167static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
1168 struct file *out, loff_t *ppos,
1169 size_t len, unsigned int flags)
1170{
Bob Peterson2fba46a2020-02-27 12:47:53 -06001171 ssize_t ret;
Bob Petersonf1ea6f42015-02-24 07:22:28 -06001172
Bob Petersonf1ea6f42015-02-24 07:22:28 -06001173 gfs2_size_hint(out, *ppos, len);
1174
Bob Peterson2fba46a2020-02-27 12:47:53 -06001175 ret = iter_file_splice_write(pipe, out, ppos, len, flags);
Bob Peterson2fba46a2020-02-27 12:47:53 -06001176 return ret;
Bob Petersonf1ea6f42015-02-24 07:22:28 -06001177}
1178
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001179#ifdef CONFIG_GFS2_FS_LOCKING_DLM
1180
David Teiglandb3b94fa2006-01-16 16:50:04 +00001181/**
1182 * gfs2_lock - acquire/release a posix lock on a file
1183 * @file: the file pointer
1184 * @cmd: either modify or retrieve lock state, possibly wait
1185 * @fl: type and range of lock
1186 *
1187 * Returns: errno
1188 */
1189
1190static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
1191{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001192 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
1193 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001194 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001195
David Teiglandb3b94fa2006-01-16 16:50:04 +00001196 if (!(fl->fl_flags & FL_POSIX))
1197 return -ENOLCK;
Sachin Prabhu720e7742010-03-11 12:24:45 -05001198 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001199 return -ENOLCK;
1200
Marc Eshel586759f2006-11-14 16:37:25 -05001201 if (cmd == F_CANCELLK) {
1202 /* Hack: */
1203 cmd = F_SETLK;
1204 fl->fl_type = F_UNLCK;
1205 }
Bob Petersoneb43e662019-11-14 09:52:15 -05001206 if (unlikely(gfs2_withdrawn(sdp))) {
Steven Whitehousec2952d22013-03-14 15:49:59 +00001207 if (fl->fl_type == F_UNLCK)
Benjamin Coddington4f656362015-10-22 13:38:14 -04001208 locks_lock_file_wait(file, fl);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001209 return -EIO;
Steven Whitehousec2952d22013-03-14 15:49:59 +00001210 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001211 if (IS_GETLK(cmd))
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001212 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001213 else if (fl->fl_type == F_UNLCK)
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001214 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001215 else
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001216 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001217}
1218
David Teiglandb3b94fa2006-01-16 16:50:04 +00001219static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1220{
Steven Whitehouse5c676f62006-02-27 17:23:27 -05001221 struct gfs2_file *fp = file->private_data;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001222 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
Al Viro496ad9a2013-01-23 17:07:38 -05001223 struct gfs2_inode *ip = GFS2_I(file_inode(file));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001224 struct gfs2_glock *gl;
1225 unsigned int state;
Bob Petersonb58bf402015-07-24 09:45:43 -05001226 u16 flags;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001227 int error = 0;
Bob Peterson2ddfbdd2014-08-20 12:44:45 -04001228 int sleeptime;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001229
1230 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
Bob Peterson2ddfbdd2014-08-20 12:44:45 -04001231 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001232
Steven Whitehousef55ab262006-02-21 12:51:39 +00001233 mutex_lock(&fp->f_fl_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001234
Andreas Gruenbacher283c9a92017-07-17 13:39:15 -05001235 if (gfs2_holder_initialized(fl_gh)) {
NeilBrown4d62d3f2018-11-30 10:04:08 +11001236 struct file_lock request;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001237 if (fl_gh->gh_state == state)
1238 goto out;
NeilBrown4d62d3f2018-11-30 10:04:08 +11001239 locks_init_lock(&request);
1240 request.fl_type = F_UNLCK;
1241 request.fl_flags = FL_FLOCK;
1242 locks_lock_file_wait(file, &request);
Bob Peterson5bef3e72014-06-26 10:46:25 -04001243 gfs2_glock_dq(fl_gh);
Abhijith Dasb4c20162007-09-13 23:35:27 -05001244 gfs2_holder_reinit(state, flags, fl_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001245 } else {
Steven Whitehouse6802e342008-05-21 17:03:22 +01001246 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1247 &gfs2_flock_glops, CREATE, &gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001248 if (error)
1249 goto out;
Abhijith Dasb4c20162007-09-13 23:35:27 -05001250 gfs2_holder_init(gl, state, flags, fl_gh);
1251 gfs2_glock_put(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001252 }
Bob Peterson2ddfbdd2014-08-20 12:44:45 -04001253 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1254 error = gfs2_glock_nq(fl_gh);
1255 if (error != GLR_TRYFAILED)
1256 break;
1257 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1258 fl_gh->gh_error = 0;
1259 msleep(sleeptime);
1260 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001261 if (error) {
1262 gfs2_holder_uninit(fl_gh);
1263 if (error == GLR_TRYFAILED)
1264 error = -EAGAIN;
1265 } else {
Benjamin Coddington4f656362015-10-22 13:38:14 -04001266 error = locks_lock_file_wait(file, fl);
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001267 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001268 }
1269
Steven Whitehouse420b9e52006-07-31 15:42:17 -04001270out:
Steven Whitehousef55ab262006-02-21 12:51:39 +00001271 mutex_unlock(&fp->f_fl_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001272 return error;
1273}
1274
1275static void do_unflock(struct file *file, struct file_lock *fl)
1276{
Steven Whitehouse5c676f62006-02-27 17:23:27 -05001277 struct gfs2_file *fp = file->private_data;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001278 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1279
Steven Whitehousef55ab262006-02-21 12:51:39 +00001280 mutex_lock(&fp->f_fl_mutex);
Benjamin Coddington4f656362015-10-22 13:38:14 -04001281 locks_lock_file_wait(file, fl);
Andreas Gruenbacher6df9f9a2016-06-17 07:31:27 -05001282 if (gfs2_holder_initialized(fl_gh)) {
Bob Peterson2ddfbdd2014-08-20 12:44:45 -04001283 gfs2_glock_dq(fl_gh);
Steven Whitehouse0a334432011-03-09 11:14:32 +00001284 gfs2_holder_uninit(fl_gh);
1285 }
Steven Whitehousef55ab262006-02-21 12:51:39 +00001286 mutex_unlock(&fp->f_fl_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001287}
1288
1289/**
1290 * gfs2_flock - acquire/release a flock lock on a file
1291 * @file: the file pointer
1292 * @cmd: either modify or retrieve lock state, possibly wait
1293 * @fl: type and range of lock
1294 *
1295 * Returns: errno
1296 */
1297
1298static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1299{
David Teiglandb3b94fa2006-01-16 16:50:04 +00001300 if (!(fl->fl_flags & FL_FLOCK))
1301 return -ENOLCK;
Abhijith Dasa12af1e2009-06-01 12:30:03 -05001302 if (fl->fl_type & LOCK_MAND)
1303 return -EOPNOTSUPP;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001304
David Teiglandb3b94fa2006-01-16 16:50:04 +00001305 if (fl->fl_type == F_UNLCK) {
1306 do_unflock(file, fl);
1307 return 0;
Steven Whitehoused00223f2006-10-02 10:28:05 -04001308 } else {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001309 return do_flock(file, cmd, fl);
Steven Whitehoused00223f2006-10-02 10:28:05 -04001310 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001311}
1312
Christoph Hellwig10d21982009-04-07 19:42:17 +02001313const struct file_operations gfs2_file_fops = {
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001314 .llseek = gfs2_llseek,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +01001315 .read_iter = gfs2_file_read_iter,
Al Viroda56e452014-04-03 14:11:01 -04001316 .write_iter = gfs2_file_write_iter,
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001317 .iopoll = iomap_dio_iopoll,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001318 .unlocked_ioctl = gfs2_ioctl,
Arnd Bergmann8d098072019-06-03 13:40:01 +02001319 .compat_ioctl = gfs2_compat_ioctl,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001320 .mmap = gfs2_mmap,
1321 .open = gfs2_open,
Bob Petersondf3fd112012-04-11 12:56:41 -04001322 .release = gfs2_release,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001323 .fsync = gfs2_fsync,
1324 .lock = gfs2_lock,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001325 .flock = gfs2_flock,
Al Viro82c156f2016-09-22 23:35:42 -04001326 .splice_read = generic_file_splice_read,
Bob Petersonf42a69f2015-02-19 08:02:16 -06001327 .splice_write = gfs2_file_splice_write,
Jeff Layton1c994a02014-08-27 06:49:41 -04001328 .setlease = simple_nosetlease,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001329 .fallocate = gfs2_fallocate,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001330};
1331
Christoph Hellwig10d21982009-04-07 19:42:17 +02001332const struct file_operations gfs2_dir_fops = {
Al Viro1d1bb232016-05-12 17:00:20 -04001333 .iterate_shared = gfs2_readdir,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001334 .unlocked_ioctl = gfs2_ioctl,
Arnd Bergmann8d098072019-06-03 13:40:01 +02001335 .compat_ioctl = gfs2_compat_ioctl,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001336 .open = gfs2_open,
Bob Petersondf3fd112012-04-11 12:56:41 -04001337 .release = gfs2_release,
Steven Whitehouse26c1a572006-09-04 15:32:10 -04001338 .fsync = gfs2_fsync,
1339 .lock = gfs2_lock,
1340 .flock = gfs2_flock,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001341 .llseek = default_llseek,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001342};
1343
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001344#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1345
Christoph Hellwig10d21982009-04-07 19:42:17 +02001346const struct file_operations gfs2_file_fops_nolock = {
Wendy Chengc97bfe42007-11-29 17:56:51 -05001347 .llseek = gfs2_llseek,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +01001348 .read_iter = gfs2_file_read_iter,
Al Viroda56e452014-04-03 14:11:01 -04001349 .write_iter = gfs2_file_write_iter,
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001350 .iopoll = iomap_dio_iopoll,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001351 .unlocked_ioctl = gfs2_ioctl,
Arnd Bergmann8d098072019-06-03 13:40:01 +02001352 .compat_ioctl = gfs2_compat_ioctl,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001353 .mmap = gfs2_mmap,
1354 .open = gfs2_open,
Bob Petersondf3fd112012-04-11 12:56:41 -04001355 .release = gfs2_release,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001356 .fsync = gfs2_fsync,
Al Viro82c156f2016-09-22 23:35:42 -04001357 .splice_read = generic_file_splice_read,
Bob Petersonf42a69f2015-02-19 08:02:16 -06001358 .splice_write = gfs2_file_splice_write,
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001359 .setlease = generic_setlease,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001360 .fallocate = gfs2_fallocate,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001361};
1362
Christoph Hellwig10d21982009-04-07 19:42:17 +02001363const struct file_operations gfs2_dir_fops_nolock = {
Al Viro1d1bb232016-05-12 17:00:20 -04001364 .iterate_shared = gfs2_readdir,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001365 .unlocked_ioctl = gfs2_ioctl,
Arnd Bergmann8d098072019-06-03 13:40:01 +02001366 .compat_ioctl = gfs2_compat_ioctl,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001367 .open = gfs2_open,
Bob Petersondf3fd112012-04-11 12:56:41 -04001368 .release = gfs2_release,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001369 .fsync = gfs2_fsync,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001370 .llseek = default_llseek,
Wendy Chengc97bfe42007-11-29 17:56:51 -05001371};
1372