blob: affa557c233704bc327cff86a851471ccfef586b [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +11009#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_inode.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_rtalloc.h"
Darrick J. Wong2810bd62019-07-02 09:39:40 -070015#include "xfs_iwalk.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "xfs_itable.h"
Nathan Scotta844f452005-11-02 14:38:42 +110017#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs_attr.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bmap.h"
Dave Chinner68988112013-08-12 20:49:42 +100020#include "xfs_bmap_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "xfs_fsops.h"
Christoph Hellwiga46db602011-01-07 13:02:04 +000022#include "xfs_discard.h"
Christoph Hellwig25fe55e2008-07-18 17:13:20 +100023#include "xfs_quota.h"
Christoph Hellwigd296d302009-01-19 02:02:57 +010024#include "xfs_export.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000025#include "xfs_trace.h"
Brian Foster8ca149d2012-11-07 12:21:12 -050026#include "xfs_icache.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110027#include "xfs_trans.h"
Andreas Gruenbacher47e1bf62015-11-03 12:56:17 +110028#include "xfs_acl.h"
Darrick J. Wonge89c0412017-03-28 14:56:37 -070029#include "xfs_btree.h"
30#include <linux/fsmap.h>
31#include "xfs_fsmap.h"
Darrick J. Wong36fd6e82017-10-17 21:37:34 -070032#include "scrub/xfs_scrub.h"
Darrick J. Wongc368ebc2018-01-08 10:51:27 -080033#include "xfs_sb.h"
Darrick J. Wong7cd50062019-04-12 07:41:17 -070034#include "xfs_ag.h"
Darrick J. Wongc23232d2019-04-12 07:41:17 -070035#include "xfs_health.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/mount.h>
38#include <linux/namei.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/*
41 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
42 * a file or fs handle.
43 *
44 * XFS_IOC_PATH_TO_FSHANDLE
45 * returns fs handle for a mount point or path within that mount point
46 * XFS_IOC_FD_TO_HANDLE
47 * returns full handle for a FD opened in user space
48 * XFS_IOC_PATH_TO_HANDLE
49 * returns full handle for a path
50 */
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060051int
Linus Torvalds1da177e2005-04-16 15:20:36 -070052xfs_find_handle(
53 unsigned int cmd,
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -060054 xfs_fsop_handlereq_t *hreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
56 int hsize;
57 xfs_handle_t handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 struct inode *inode;
Dave Chinnera30b0362013-09-02 20:49:36 +100059 struct fd f = {NULL};
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010060 struct path path;
Al Viro2903ff02012-08-28 12:52:22 -040061 int error;
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010062 struct xfs_inode *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010064 if (cmd == XFS_IOC_FD_TO_HANDLE) {
Al Viro2903ff02012-08-28 12:52:22 -040065 f = fdget(hreq->fd);
66 if (!f.file)
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010067 return -EBADF;
Al Viro496ad9a2013-01-23 17:07:38 -050068 inode = file_inode(f.file);
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010069 } else {
Al Viroce6595a2019-07-14 16:42:44 -040070 error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010071 if (error)
72 return error;
David Howells2b0143b2015-03-17 22:25:59 +000073 inode = d_inode(path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 }
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010075 ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010077 /*
78 * We can only generate handles for inodes residing on a XFS filesystem,
79 * and only for regular files, directories or symbolic links.
80 */
81 error = -EINVAL;
82 if (inode->i_sb->s_magic != XFS_SB_MAGIC)
83 goto out_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010085 error = -EBADF;
86 if (!S_ISREG(inode->i_mode) &&
87 !S_ISDIR(inode->i_mode) &&
88 !S_ISLNK(inode->i_mode))
89 goto out_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010092 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010094 if (cmd == XFS_IOC_PATH_TO_FSHANDLE) {
95 /*
96 * This handle only contains an fsid, zero the rest.
97 */
98 memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
99 hsize = sizeof(xfs_fsid_t);
100 } else {
Christoph Hellwigc6143912007-09-14 15:22:37 +1000101 handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
102 sizeof(handle.ha_fid.fid_len);
103 handle.ha_fid.fid_pad = 0;
Dave Chinner9e9a2672016-02-09 16:54:58 +1100104 handle.ha_fid.fid_gen = inode->i_generation;
Christoph Hellwigc6143912007-09-14 15:22:37 +1000105 handle.ha_fid.fid_ino = ip->i_ino;
Christoph Hellwig3398a402017-06-14 21:30:44 -0700106 hsize = sizeof(xfs_handle_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 }
108
Christoph Hellwig4346cdd2009-02-08 21:51:14 +0100109 error = -EFAULT;
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600110 if (copy_to_user(hreq->ohandle, &handle, hsize) ||
Christoph Hellwig4346cdd2009-02-08 21:51:14 +0100111 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
112 goto out_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Christoph Hellwig4346cdd2009-02-08 21:51:14 +0100114 error = 0;
115
116 out_put:
117 if (cmd == XFS_IOC_FD_TO_HANDLE)
Al Viro2903ff02012-08-28 12:52:22 -0400118 fdput(f);
Christoph Hellwig4346cdd2009-02-08 21:51:14 +0100119 else
120 path_put(&path);
121 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122}
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124/*
Christoph Hellwigd296d302009-01-19 02:02:57 +0100125 * No need to do permission checks on the various pathname components
126 * as the handle operations are privileged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 */
128STATIC int
Christoph Hellwigd296d302009-01-19 02:02:57 +0100129xfs_handle_acceptable(
130 void *context,
131 struct dentry *dentry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
Christoph Hellwigd296d302009-01-19 02:02:57 +0100133 return 1;
134}
135
136/*
137 * Convert userspace handle data into a dentry.
138 */
139struct dentry *
140xfs_handle_to_dentry(
141 struct file *parfilp,
142 void __user *uhandle,
143 u32 hlen)
144{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 xfs_handle_t handle;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100146 struct xfs_fid64 fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 /*
149 * Only allow handle opens under a directory.
150 */
Al Viro496ad9a2013-01-23 17:07:38 -0500151 if (!S_ISDIR(file_inode(parfilp)->i_mode))
Christoph Hellwigd296d302009-01-19 02:02:57 +0100152 return ERR_PTR(-ENOTDIR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Christoph Hellwigd296d302009-01-19 02:02:57 +0100154 if (hlen != sizeof(xfs_handle_t))
155 return ERR_PTR(-EINVAL);
156 if (copy_from_user(&handle, uhandle, hlen))
157 return ERR_PTR(-EFAULT);
158 if (handle.ha_fid.fid_len !=
159 sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len))
160 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Christoph Hellwigd296d302009-01-19 02:02:57 +0100162 memset(&fid, 0, sizeof(struct fid));
163 fid.ino = handle.ha_fid.fid_ino;
164 fid.gen = handle.ha_fid.fid_gen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Christoph Hellwigd296d302009-01-19 02:02:57 +0100166 return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3,
167 FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG,
168 xfs_handle_acceptable, NULL);
169}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Christoph Hellwigd296d302009-01-19 02:02:57 +0100171STATIC struct dentry *
172xfs_handlereq_to_dentry(
173 struct file *parfilp,
174 xfs_fsop_handlereq_t *hreq)
175{
176 return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600179int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180xfs_open_by_handle(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 struct file *parfilp,
Christoph Hellwigd296d302009-01-19 02:02:57 +0100182 xfs_fsop_handlereq_t *hreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
David Howells745ca242008-11-14 10:39:22 +1100184 const struct cred *cred = current_cred();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 int error;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100186 int fd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 int permflag;
188 struct file *filp;
189 struct inode *inode;
190 struct dentry *dentry;
Dave Chinner1a1d7722012-03-22 05:15:06 +0000191 fmode_t fmode;
Al Viro765927b2012-06-26 21:58:53 +0400192 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000195 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Christoph Hellwigd296d302009-01-19 02:02:57 +0100197 dentry = xfs_handlereq_to_dentry(parfilp, hreq);
198 if (IS_ERR(dentry))
199 return PTR_ERR(dentry);
David Howells2b0143b2015-03-17 22:25:59 +0000200 inode = d_inode(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 /* Restrict xfs_open_by_handle to directories & regular files. */
203 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000204 error = -EPERM;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100205 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 }
207
208#if BITS_PER_LONG != 32
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600209 hreq->oflags |= O_LARGEFILE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210#endif
Christoph Hellwigd296d302009-01-19 02:02:57 +0100211
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600212 permflag = hreq->oflags;
Dave Chinner1a1d7722012-03-22 05:15:06 +0000213 fmode = OPEN_FMODE(permflag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
Dave Chinner1a1d7722012-03-22 05:15:06 +0000215 (fmode & FMODE_WRITE) && IS_APPEND(inode)) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000216 error = -EPERM;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100217 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 }
219
Dave Chinner1a1d7722012-03-22 05:15:06 +0000220 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
Eryu Guan337684a2016-08-02 19:58:28 +0800221 error = -EPERM;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100222 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 }
224
225 /* Can't write directories. */
Dave Chinner1a1d7722012-03-22 05:15:06 +0000226 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000227 error = -EISDIR;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100228 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 }
230
Yann Droneaud862a6292013-07-02 18:39:34 +0200231 fd = get_unused_fd_flags(0);
Christoph Hellwigd296d302009-01-19 02:02:57 +0100232 if (fd < 0) {
233 error = fd;
234 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 }
236
Al Viro765927b2012-06-26 21:58:53 +0400237 path.mnt = parfilp->f_path.mnt;
238 path.dentry = dentry;
239 filp = dentry_open(&path, hreq->oflags, cred);
240 dput(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 if (IS_ERR(filp)) {
Christoph Hellwigd296d302009-01-19 02:02:57 +0100242 put_unused_fd(fd);
243 return PTR_ERR(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 }
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500245
Al Viro03209372011-07-25 20:54:24 -0400246 if (S_ISREG(inode->i_mode)) {
Vlad Apostolov2e2e7bb2006-11-11 18:04:47 +1100247 filp->f_flags |= O_NOATIME;
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500248 filp->f_mode |= FMODE_NOCMTIME;
Vlad Apostolov2e2e7bb2006-11-11 18:04:47 +1100249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Christoph Hellwigd296d302009-01-19 02:02:57 +0100251 fd_install(fd, filp);
252 return fd;
253
254 out_dput:
255 dput(dentry);
256 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257}
258
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600259int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260xfs_readlink_by_handle(
Christoph Hellwigd296d302009-01-19 02:02:57 +0100261 struct file *parfilp,
262 xfs_fsop_handlereq_t *hreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
Christoph Hellwigd296d302009-01-19 02:02:57 +0100264 struct dentry *dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 __u32 olen;
Christoph Hellwig804c83c2007-08-28 13:59:03 +1000266 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000269 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Christoph Hellwigd296d302009-01-19 02:02:57 +0100271 dentry = xfs_handlereq_to_dentry(parfilp, hreq);
272 if (IS_ERR(dentry))
273 return PTR_ERR(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
275 /* Restrict this handle operation to symlinks only. */
Miklos Szeredifd4a0edf2016-12-09 16:45:04 +0100276 if (!d_is_symlink(dentry)) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000277 error = -EINVAL;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100278 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 }
280
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600281 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000282 error = -EFAULT;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100283 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Miklos Szeredifd4a0edf2016-12-09 16:45:04 +0100286 error = vfs_readlink(dentry, hreq->ohandle, olen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
Christoph Hellwigd296d302009-01-19 02:02:57 +0100288 out_dput:
289 dput(dentry);
Christoph Hellwig804c83c2007-08-28 13:59:03 +1000290 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291}
292
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000293int
294xfs_set_dmattrs(
295 xfs_inode_t *ip,
Darrick J. Wong65a79352017-11-09 09:34:28 -0800296 uint evmask,
297 uint16_t state)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000298{
299 xfs_mount_t *mp = ip->i_mount;
300 xfs_trans_t *tp;
301 int error;
302
303 if (!capable(CAP_SYS_ADMIN))
Dave Chinner24513372014-06-25 14:58:08 +1000304 return -EPERM;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000305
306 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +1000307 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000308
Christoph Hellwig253f4912016-04-06 09:19:55 +1000309 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
310 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000311 return error;
Christoph Hellwig253f4912016-04-06 09:19:55 +1000312
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000313 xfs_ilock(ip, XFS_ILOCK_EXCL);
314 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
315
316 ip->i_d.di_dmevmask = evmask;
317 ip->i_d.di_dmstate = state;
318
319 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
Christoph Hellwig70393312015-06-04 13:48:08 +1000320 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000321
322 return error;
323}
324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325STATIC int
326xfs_fssetdm_by_handle(
Christoph Hellwigd296d302009-01-19 02:02:57 +0100327 struct file *parfilp,
328 void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329{
330 int error;
331 struct fsdmidata fsd;
332 xfs_fsop_setdm_handlereq_t dmhreq;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100333 struct dentry *dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 if (!capable(CAP_MKNOD))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000336 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000338 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Jan Karad9457dc2012-06-12 16:20:39 +0200340 error = mnt_want_write_file(parfilp);
341 if (error)
342 return error;
343
Christoph Hellwigd296d302009-01-19 02:02:57 +0100344 dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq);
Jan Karad9457dc2012-06-12 16:20:39 +0200345 if (IS_ERR(dentry)) {
346 mnt_drop_write_file(parfilp);
Christoph Hellwigd296d302009-01-19 02:02:57 +0100347 return PTR_ERR(dentry);
Jan Karad9457dc2012-06-12 16:20:39 +0200348 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
David Howells2b0143b2015-03-17 22:25:59 +0000350 if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000351 error = -EPERM;
Christoph Hellwig6e7f75e2007-10-11 18:09:50 +1000352 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
354
355 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000356 error = -EFAULT;
Christoph Hellwig6e7f75e2007-10-11 18:09:50 +1000357 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 }
359
David Howells2b0143b2015-03-17 22:25:59 +0000360 error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
Christoph Hellwig6e7f75e2007-10-11 18:09:50 +1000361 fsd.fsd_dmstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Christoph Hellwig6e7f75e2007-10-11 18:09:50 +1000363 out:
Jan Karad9457dc2012-06-12 16:20:39 +0200364 mnt_drop_write_file(parfilp);
Christoph Hellwigd296d302009-01-19 02:02:57 +0100365 dput(dentry);
Christoph Hellwig6e7f75e2007-10-11 18:09:50 +1000366 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367}
368
369STATIC int
370xfs_attrlist_by_handle(
Christoph Hellwigd296d302009-01-19 02:02:57 +0100371 struct file *parfilp,
372 void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
Christoph Hellwigd296d302009-01-19 02:02:57 +0100374 int error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 attrlist_cursor_kern_t *cursor;
Darrick J. Wong0facef72016-08-03 10:58:53 +1000376 struct xfs_fsop_attrlist_handlereq __user *p = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 xfs_fsop_attrlist_handlereq_t al_hreq;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100378 struct dentry *dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 char *kbuf;
380
381 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000382 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000384 return -EFAULT;
Dan Carpenter071c5292013-10-31 21:00:10 +0300385 if (al_hreq.buflen < sizeof(struct attrlist) ||
Jan Tulak4e247612015-10-12 16:02:56 +1100386 al_hreq.buflen > XFS_XATTR_LIST_MAX)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000387 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Christoph Hellwig90ad58a2008-06-27 13:32:19 +1000389 /*
390 * Reject flags, only allow namespaces.
391 */
392 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000393 return -EINVAL;
Christoph Hellwig90ad58a2008-06-27 13:32:19 +1000394
Christoph Hellwigd296d302009-01-19 02:02:57 +0100395 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
396 if (IS_ERR(dentry))
397 return PTR_ERR(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000399 kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP);
400 if (!kbuf)
401 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
David Howells2b0143b2015-03-17 22:25:59 +0000404 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000405 al_hreq.flags, cursor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 if (error)
407 goto out_kfree;
408
Darrick J. Wong0facef72016-08-03 10:58:53 +1000409 if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
410 error = -EFAULT;
411 goto out_kfree;
412 }
413
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
415 error = -EFAULT;
416
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000417out_kfree:
418 kmem_free(kbuf);
419out_dput:
Christoph Hellwigd296d302009-01-19 02:02:57 +0100420 dput(dentry);
421 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422}
423
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600424int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425xfs_attrmulti_attr_get(
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000426 struct inode *inode,
Dave Chinnera9273ca2010-01-20 10:47:48 +1100427 unsigned char *name,
428 unsigned char __user *ubuf,
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700429 uint32_t *len,
430 uint32_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431{
Dave Chinnera9273ca2010-01-20 10:47:48 +1100432 unsigned char *kbuf;
Dave Chinner24513372014-06-25 14:58:08 +1000433 int error = -EFAULT;
Barry Naujoke8b0eba2008-04-22 17:34:31 +1000434
Jan Tulak51fcbfe2015-10-12 16:03:59 +1100435 if (*len > XFS_XATTR_SIZE_MAX)
Dave Chinner24513372014-06-25 14:58:08 +1000436 return -EINVAL;
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000437 kbuf = kmem_zalloc_large(*len, KM_SLEEP);
438 if (!kbuf)
Dave Chinner24513372014-06-25 14:58:08 +1000439 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Barry Naujoke8b0eba2008-04-22 17:34:31 +1000441 error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 if (error)
443 goto out_kfree;
444
445 if (copy_to_user(ubuf, kbuf, *len))
Dave Chinner24513372014-06-25 14:58:08 +1000446 error = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000448out_kfree:
449 kmem_free(kbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 return error;
451}
452
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600453int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454xfs_attrmulti_attr_set(
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000455 struct inode *inode,
Dave Chinnera9273ca2010-01-20 10:47:48 +1100456 unsigned char *name,
457 const unsigned char __user *ubuf,
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700458 uint32_t len,
459 uint32_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460{
Dave Chinnera9273ca2010-01-20 10:47:48 +1100461 unsigned char *kbuf;
Andreas Gruenbacher09cb22d2015-11-03 12:53:54 +1100462 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000464 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
Dave Chinner24513372014-06-25 14:58:08 +1000465 return -EPERM;
Jan Tulak51fcbfe2015-10-12 16:03:59 +1100466 if (len > XFS_XATTR_SIZE_MAX)
Dave Chinner24513372014-06-25 14:58:08 +1000467 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Li Zefan0e639bd2009-04-08 15:08:04 +0800469 kbuf = memdup_user(ubuf, len);
470 if (IS_ERR(kbuf))
471 return PTR_ERR(kbuf);
Barry Naujoke8b0eba2008-04-22 17:34:31 +1000472
Andreas Gruenbacher09cb22d2015-11-03 12:53:54 +1100473 error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
Andreas Gruenbacher47e1bf62015-11-03 12:56:17 +1100474 if (!error)
475 xfs_forget_acl(inode, name, flags);
Andreas Gruenbacher09cb22d2015-11-03 12:53:54 +1100476 kfree(kbuf);
477 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478}
479
sandeen@sandeen.net28750972008-11-25 21:20:15 -0600480int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481xfs_attrmulti_attr_remove(
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000482 struct inode *inode,
Dave Chinnera9273ca2010-01-20 10:47:48 +1100483 unsigned char *name,
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700484 uint32_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
Andreas Gruenbacher47e1bf62015-11-03 12:56:17 +1100486 int error;
487
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000488 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
Dave Chinner24513372014-06-25 14:58:08 +1000489 return -EPERM;
Andreas Gruenbacher47e1bf62015-11-03 12:56:17 +1100490 error = xfs_attr_remove(XFS_I(inode), name, flags);
491 if (!error)
492 xfs_forget_acl(inode, name, flags);
493 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494}
495
496STATIC int
497xfs_attrmulti_by_handle(
Dave Hansen42a74f22008-02-15 14:37:46 -0800498 struct file *parfilp,
Christoph Hellwigd296d302009-01-19 02:02:57 +0100499 void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
501 int error;
502 xfs_attr_multiop_t *ops;
503 xfs_fsop_attrmulti_handlereq_t am_hreq;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100504 struct dentry *dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 unsigned int i, size;
Dave Chinnera9273ca2010-01-20 10:47:48 +1100506 unsigned char *attr_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000509 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000511 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Zhitong Wangfda168c2010-03-23 09:51:22 +1100513 /* overflow check */
514 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t))
515 return -E2BIG;
516
Christoph Hellwigd296d302009-01-19 02:02:57 +0100517 dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
518 if (IS_ERR(dentry))
519 return PTR_ERR(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
Dave Chinner24513372014-06-25 14:58:08 +1000521 error = -E2BIG;
Christoph Hellwige182f572008-06-27 13:32:31 +1000522 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 if (!size || size > 16 * PAGE_SIZE)
Christoph Hellwigd296d302009-01-19 02:02:57 +0100524 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Li Zefan0e639bd2009-04-08 15:08:04 +0800526 ops = memdup_user(am_hreq.ops, size);
527 if (IS_ERR(ops)) {
Dave Chinner24513372014-06-25 14:58:08 +1000528 error = PTR_ERR(ops);
Christoph Hellwigd296d302009-01-19 02:02:57 +0100529 goto out_dput;
Li Zefan0e639bd2009-04-08 15:08:04 +0800530 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
Dave Chinner24513372014-06-25 14:58:08 +1000532 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
534 if (!attr_name)
535 goto out_kfree_ops;
536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 error = 0;
538 for (i = 0; i < am_hreq.opcount; i++) {
Dave Chinnera9273ca2010-01-20 10:47:48 +1100539 ops[i].am_error = strncpy_from_user((char *)attr_name,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 ops[i].am_attrname, MAXNAMELEN);
541 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
Dave Chinner24513372014-06-25 14:58:08 +1000542 error = -ERANGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 if (ops[i].am_error < 0)
544 break;
545
546 switch (ops[i].am_opcode) {
547 case ATTR_OP_GET:
Christoph Hellwigd296d302009-01-19 02:02:57 +0100548 ops[i].am_error = xfs_attrmulti_attr_get(
David Howells2b0143b2015-03-17 22:25:59 +0000549 d_inode(dentry), attr_name,
Christoph Hellwigd296d302009-01-19 02:02:57 +0100550 ops[i].am_attrvalue, &ops[i].am_length,
551 ops[i].am_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 break;
553 case ATTR_OP_SET:
Al Viroa561be72011-11-23 11:57:51 -0500554 ops[i].am_error = mnt_want_write_file(parfilp);
Dave Hansen42a74f22008-02-15 14:37:46 -0800555 if (ops[i].am_error)
556 break;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100557 ops[i].am_error = xfs_attrmulti_attr_set(
David Howells2b0143b2015-03-17 22:25:59 +0000558 d_inode(dentry), attr_name,
Christoph Hellwigd296d302009-01-19 02:02:57 +0100559 ops[i].am_attrvalue, ops[i].am_length,
560 ops[i].am_flags);
Al Viro2a79f172011-12-09 08:06:57 -0500561 mnt_drop_write_file(parfilp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 break;
563 case ATTR_OP_REMOVE:
Al Viroa561be72011-11-23 11:57:51 -0500564 ops[i].am_error = mnt_want_write_file(parfilp);
Dave Hansen42a74f22008-02-15 14:37:46 -0800565 if (ops[i].am_error)
566 break;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100567 ops[i].am_error = xfs_attrmulti_attr_remove(
David Howells2b0143b2015-03-17 22:25:59 +0000568 d_inode(dentry), attr_name,
Christoph Hellwigd296d302009-01-19 02:02:57 +0100569 ops[i].am_flags);
Al Viro2a79f172011-12-09 08:06:57 -0500570 mnt_drop_write_file(parfilp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 break;
572 default:
Dave Chinner24513372014-06-25 14:58:08 +1000573 ops[i].am_error = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 }
575 }
576
577 if (copy_to_user(am_hreq.ops, ops, size))
Dave Chinner24513372014-06-25 14:58:08 +1000578 error = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 kfree(attr_name);
581 out_kfree_ops:
582 kfree(ops);
Christoph Hellwigd296d302009-01-19 02:02:57 +0100583 out_dput:
584 dput(dentry);
Dave Chinner24513372014-06-25 14:58:08 +1000585 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586}
587
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600588int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589xfs_ioc_space(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 struct file *filp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 unsigned int cmd,
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600592 xfs_flock64_t *bf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
Christoph Hellwig8f3e2052016-07-20 11:29:35 +1000594 struct inode *inode = file_inode(filp);
595 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig865e9442013-10-12 00:55:08 -0700596 struct iattr iattr;
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100597 enum xfs_prealloc_flags flags = 0;
Dan Williamsc63a8ea2018-03-12 14:12:29 -0700598 uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 int error;
600
Alexey Dobriyanf37ea142006-09-28 10:52:04 +1000601 if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000602 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Eric Sandeenad4a8ac2005-09-02 16:41:16 +1000604 if (!(filp->f_mode & FMODE_WRITE))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000605 return -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Alexey Dobriyanf37ea142006-09-28 10:52:04 +1000607 if (!S_ISREG(inode->i_mode))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000608 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100610 if (filp->f_flags & O_DSYNC)
611 flags |= XFS_PREALLOC_SYNC;
Christoph Hellwig8f3e2052016-07-20 11:29:35 +1000612 if (filp->f_mode & FMODE_NOCMTIME)
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100613 flags |= XFS_PREALLOC_INVISIBLE;
614
Jan Karad9457dc2012-06-12 16:20:39 +0200615 error = mnt_want_write_file(filp);
616 if (error)
617 return error;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700618
Christoph Hellwig781355c2015-02-16 11:59:50 +1100619 xfs_ilock(ip, iolock);
Dan Williams69eb5fa2018-03-20 14:42:38 -0700620 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100621 if (error)
622 goto out_unlock;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700623
624 switch (bf->l_whence) {
625 case 0: /*SEEK_SET*/
626 break;
627 case 1: /*SEEK_CUR*/
628 bf->l_start += filp->f_pos;
629 break;
630 case 2: /*SEEK_END*/
631 bf->l_start += XFS_ISIZE(ip);
632 break;
633 default:
Dave Chinner24513372014-06-25 14:58:08 +1000634 error = -EINVAL;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700635 goto out_unlock;
636 }
637
638 /*
639 * length of <= 0 for resv/unresv/zero is invalid. length for
640 * alloc/free is ignored completely and we have no idea what userspace
641 * might have set it to, so set it to zero to allow range
642 * checks to pass.
643 */
644 switch (cmd) {
645 case XFS_IOC_ZERO_RANGE:
646 case XFS_IOC_RESVSP:
647 case XFS_IOC_RESVSP64:
648 case XFS_IOC_UNRESVSP:
649 case XFS_IOC_UNRESVSP64:
650 if (bf->l_len <= 0) {
Dave Chinner24513372014-06-25 14:58:08 +1000651 error = -EINVAL;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700652 goto out_unlock;
653 }
654 break;
655 default:
656 bf->l_len = 0;
657 break;
658 }
659
660 if (bf->l_start < 0 ||
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100661 bf->l_start > inode->i_sb->s_maxbytes ||
Christoph Hellwig865e9442013-10-12 00:55:08 -0700662 bf->l_start + bf->l_len < 0 ||
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100663 bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) {
Dave Chinner24513372014-06-25 14:58:08 +1000664 error = -EINVAL;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700665 goto out_unlock;
666 }
667
668 switch (cmd) {
669 case XFS_IOC_ZERO_RANGE:
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100670 flags |= XFS_PREALLOC_SET;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700671 error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
Christoph Hellwig865e9442013-10-12 00:55:08 -0700672 break;
673 case XFS_IOC_RESVSP:
674 case XFS_IOC_RESVSP64:
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100675 flags |= XFS_PREALLOC_SET;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700676 error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
677 XFS_BMAPI_PREALLOC);
Christoph Hellwig865e9442013-10-12 00:55:08 -0700678 break;
679 case XFS_IOC_UNRESVSP:
680 case XFS_IOC_UNRESVSP64:
681 error = xfs_free_file_space(ip, bf->l_start, bf->l_len);
682 break;
683 case XFS_IOC_ALLOCSP:
684 case XFS_IOC_ALLOCSP64:
685 case XFS_IOC_FREESP:
686 case XFS_IOC_FREESP64:
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100687 flags |= XFS_PREALLOC_CLEAR;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700688 if (bf->l_start > XFS_ISIZE(ip)) {
689 error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
690 bf->l_start - XFS_ISIZE(ip), 0);
691 if (error)
692 goto out_unlock;
693 }
694
695 iattr.ia_valid = ATTR_SIZE;
696 iattr.ia_size = bf->l_start;
697
Jan Kara69bca802016-05-26 14:46:43 +0200698 error = xfs_vn_setattr_size(file_dentry(filp), &iattr);
Christoph Hellwig865e9442013-10-12 00:55:08 -0700699 break;
700 default:
701 ASSERT(0);
Dave Chinner24513372014-06-25 14:58:08 +1000702 error = -EINVAL;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700703 }
704
705 if (error)
706 goto out_unlock;
707
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100708 error = xfs_update_prealloc_flags(ip, flags);
Christoph Hellwig865e9442013-10-12 00:55:08 -0700709
710out_unlock:
Christoph Hellwig781355c2015-02-16 11:59:50 +1100711 xfs_iunlock(ip, iolock);
Jan Karad9457dc2012-06-12 16:20:39 +0200712 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +1000713 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
715
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700716/* Return 0 on success or positive error */
717int
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700718xfs_fsbulkstat_one_fmt(
Darrick J. Wong7035f972019-07-03 20:36:26 -0700719 struct xfs_ibulk *breq,
720 const struct xfs_bulkstat *bstat)
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700721{
Darrick J. Wong7035f972019-07-03 20:36:26 -0700722 struct xfs_bstat bs1;
723
724 xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
725 if (copy_to_user(breq->ubuffer, &bs1, sizeof(bs1)))
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700726 return -EFAULT;
727 return xfs_ibulk_advance(breq, sizeof(struct xfs_bstat));
728}
729
Darrick J. Wong677717f2019-07-02 09:39:43 -0700730int
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700731xfs_fsinumbers_fmt(
Darrick J. Wong5f19c7f2019-07-03 20:36:27 -0700732 struct xfs_ibulk *breq,
733 const struct xfs_inumbers *igrp)
Darrick J. Wong677717f2019-07-02 09:39:43 -0700734{
Darrick J. Wong5f19c7f2019-07-03 20:36:27 -0700735 struct xfs_inogrp ig1;
736
737 xfs_inumbers_to_inogrp(&ig1, igrp);
738 if (copy_to_user(breq->ubuffer, &ig1, sizeof(struct xfs_inogrp)))
Darrick J. Wong677717f2019-07-02 09:39:43 -0700739 return -EFAULT;
740 return xfs_ibulk_advance(breq, sizeof(struct xfs_inogrp));
741}
742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743STATIC int
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700744xfs_ioc_fsbulkstat(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 xfs_mount_t *mp,
746 unsigned int cmd,
747 void __user *arg)
748{
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700749 struct xfs_fsop_bulkreq bulkreq;
750 struct xfs_ibulk breq = {
751 .mp = mp,
752 .ocount = 0,
753 };
754 xfs_ino_t lastino;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 int error;
756
757 /* done = 1 if there are more stats to get and if bulkstat */
758 /* should be called again (unused here, but used in dmapi) */
759
760 if (!capable(CAP_SYS_ADMIN))
761 return -EPERM;
762
763 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000764 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Darrick J. Wong6f71fb62019-07-03 20:36:25 -0700766 if (copy_from_user(&bulkreq, arg, sizeof(struct xfs_fsop_bulkreq)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000767 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700769 if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000770 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700772 if (bulkreq.icount <= 0)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000773 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Lachlan McIlroycd57e592007-11-23 16:30:32 +1100775 if (bulkreq.ubuffer == NULL)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000776 return -EINVAL;
Lachlan McIlroycd57e592007-11-23 16:30:32 +1100777
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700778 breq.ubuffer = bulkreq.ubuffer;
779 breq.icount = bulkreq.icount;
780
781 /*
782 * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
783 * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect
784 * that *lastip contains either zero or the number of the last inode to
785 * be examined by the previous call and return results starting with
786 * the next inode after that. The new bulk request back end functions
787 * take the inode to start with, so we have to compute the startino
788 * parameter from lastino to maintain correct function. lastino == 0
789 * is a special case because it has traditionally meant "first inode
790 * in filesystem".
791 */
792 if (cmd == XFS_IOC_FSINUMBERS) {
Darrick J. Wong677717f2019-07-02 09:39:43 -0700793 breq.startino = lastino ? lastino + 1 : 0;
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700794 error = xfs_inumbers(&breq, xfs_fsinumbers_fmt);
Darrick J. Wong677717f2019-07-02 09:39:43 -0700795 lastino = breq.startino - 1;
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700796 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) {
797 breq.startino = lastino;
798 breq.icount = 1;
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700799 error = xfs_bulkstat_one(&breq, xfs_fsbulkstat_one_fmt);
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700800 } else { /* XFS_IOC_FSBULKSTAT */
801 breq.startino = lastino ? lastino + 1 : 0;
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700802 error = xfs_bulkstat(&breq, xfs_fsbulkstat_one_fmt);
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700803 lastino = breq.startino - 1;
804 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
806 if (error)
Dave Chinner24513372014-06-25 14:58:08 +1000807 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700809 if (bulkreq.lastip != NULL &&
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700810 copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700811 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700813 if (bulkreq.ocount != NULL &&
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700814 copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700815 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817 return 0;
818}
819
Darrick J. Wong0448b6f2019-07-03 20:36:27 -0700820/* Return 0 on success or positive error */
821static int
822xfs_bulkstat_fmt(
823 struct xfs_ibulk *breq,
824 const struct xfs_bulkstat *bstat)
825{
826 if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat)))
827 return -EFAULT;
828 return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat));
829}
830
831/*
832 * Check the incoming bulk request @hdr from userspace and initialize the
833 * internal @breq bulk request appropriately. Returns 0 if the bulk request
834 * should proceed; XFS_ITER_ABORT if there's nothing to do; or the usual
835 * negative error code.
836 */
837static int
838xfs_bulk_ireq_setup(
839 struct xfs_mount *mp,
840 struct xfs_bulk_ireq *hdr,
841 struct xfs_ibulk *breq,
842 void __user *ubuffer)
843{
844 if (hdr->icount == 0 ||
845 (hdr->flags & ~XFS_BULK_IREQ_FLAGS_ALL) ||
Darrick J. Wong0448b6f2019-07-03 20:36:27 -0700846 memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
847 return -EINVAL;
848
849 breq->startino = hdr->ino;
850 breq->ubuffer = ubuffer;
851 breq->icount = hdr->icount;
852 breq->ocount = 0;
Darrick J. Wong13d59a22019-07-03 20:36:28 -0700853 breq->flags = 0;
854
855 /*
Darrick J. Wongbf3cb392019-07-03 20:36:29 -0700856 * The @ino parameter is a special value, so we must look it up here.
857 * We're not allowed to have IREQ_AGNO, and we only return one inode
858 * worth of data.
859 */
860 if (hdr->flags & XFS_BULK_IREQ_SPECIAL) {
861 if (hdr->flags & XFS_BULK_IREQ_AGNO)
862 return -EINVAL;
863
864 switch (hdr->ino) {
865 case XFS_BULK_IREQ_SPECIAL_ROOT:
866 hdr->ino = mp->m_sb.sb_rootino;
867 break;
868 default:
869 return -EINVAL;
870 }
871 breq->icount = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 }
873
Darrick J. Wongbf3cb392019-07-03 20:36:29 -0700874 /*
Darrick J. Wong13d59a22019-07-03 20:36:28 -0700875 * The IREQ_AGNO flag means that we only want results from a given AG.
876 * If @hdr->ino is zero, we start iterating in that AG. If @hdr->ino is
877 * beyond the specified AG then we return no results.
878 */
879 if (hdr->flags & XFS_BULK_IREQ_AGNO) {
880 if (hdr->agno >= mp->m_sb.sb_agcount)
881 return -EINVAL;
882
883 if (breq->startino == 0)
884 breq->startino = XFS_AGINO_TO_INO(mp, hdr->agno, 0);
885 else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno)
886 return -EINVAL;
887
888 breq->flags |= XFS_IBULK_SAME_AG;
889
890 /* Asking for an inode past the end of the AG? We're done! */
891 if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
892 return XFS_ITER_ABORT;
893 } else if (hdr->agno)
894 return -EINVAL;
Darrick J. Wong0448b6f2019-07-03 20:36:27 -0700895
896 /* Asking for an inode past the end of the FS? We're done! */
897 if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount)
898 return XFS_ITER_ABORT;
899
900 return 0;
901}
902
903/*
904 * Update the userspace bulk request @hdr to reflect the end state of the
905 * internal bulk request @breq.
906 */
907static void
908xfs_bulk_ireq_teardown(
909 struct xfs_bulk_ireq *hdr,
910 struct xfs_ibulk *breq)
911{
912 hdr->ino = breq->startino;
913 hdr->ocount = breq->ocount;
914}
915
916/* Handle the v5 bulkstat ioctl. */
917STATIC int
918xfs_ioc_bulkstat(
919 struct xfs_mount *mp,
920 unsigned int cmd,
921 struct xfs_bulkstat_req __user *arg)
922{
923 struct xfs_bulk_ireq hdr;
924 struct xfs_ibulk breq = {
925 .mp = mp,
926 };
927 int error;
928
929 if (!capable(CAP_SYS_ADMIN))
930 return -EPERM;
931
932 if (XFS_FORCED_SHUTDOWN(mp))
933 return -EIO;
934
935 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
936 return -EFAULT;
937
938 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat);
939 if (error == XFS_ITER_ABORT)
940 goto out_teardown;
941 if (error < 0)
942 return error;
943
944 error = xfs_bulkstat(&breq, xfs_bulkstat_fmt);
945 if (error)
946 return error;
947
948out_teardown:
949 xfs_bulk_ireq_teardown(&hdr, &breq);
950 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
951 return -EFAULT;
952
953 return 0;
954}
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956STATIC int
Darrick J. Wongfba97602019-07-03 20:36:28 -0700957xfs_inumbers_fmt(
958 struct xfs_ibulk *breq,
959 const struct xfs_inumbers *igrp)
960{
961 if (copy_to_user(breq->ubuffer, igrp, sizeof(struct xfs_inumbers)))
962 return -EFAULT;
963 return xfs_ibulk_advance(breq, sizeof(struct xfs_inumbers));
964}
965
966/* Handle the v5 inumbers ioctl. */
967STATIC int
968xfs_ioc_inumbers(
969 struct xfs_mount *mp,
970 unsigned int cmd,
971 struct xfs_inumbers_req __user *arg)
972{
973 struct xfs_bulk_ireq hdr;
974 struct xfs_ibulk breq = {
975 .mp = mp,
976 };
977 int error;
978
979 if (!capable(CAP_SYS_ADMIN))
980 return -EPERM;
981
982 if (XFS_FORCED_SHUTDOWN(mp))
983 return -EIO;
984
985 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
986 return -EFAULT;
987
988 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers);
989 if (error == XFS_ITER_ABORT)
990 goto out_teardown;
991 if (error < 0)
992 return error;
993
994 error = xfs_inumbers(&breq, xfs_inumbers_fmt);
995 if (error)
996 return error;
997
998out_teardown:
999 xfs_bulk_ireq_teardown(&hdr, &breq);
1000 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
1001 return -EFAULT;
1002
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 return 0;
1004}
1005
1006STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007xfs_ioc_fsgeometry(
Dave Chinner1b6d9682019-04-12 07:41:16 -07001008 struct xfs_mount *mp,
1009 void __user *arg,
1010 int struct_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011{
Dave Chinner1b6d9682019-04-12 07:41:16 -07001012 struct xfs_fsop_geom fsgeo;
1013 size_t len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
Eric Sandeen91083262019-05-01 20:26:30 -07001015 xfs_fs_geometry(&mp->m_sb, &fsgeo, struct_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
Dave Chinner1b6d9682019-04-12 07:41:16 -07001017 if (struct_version <= 3)
1018 len = sizeof(struct xfs_fsop_geom_v1);
1019 else if (struct_version == 4)
1020 len = sizeof(struct xfs_fsop_geom_v4);
Darrick J. Wongc23232d2019-04-12 07:41:17 -07001021 else {
1022 xfs_fsop_geom_health(mp, &fsgeo);
Dave Chinner1b6d9682019-04-12 07:41:16 -07001023 len = sizeof(fsgeo);
Darrick J. Wongc23232d2019-04-12 07:41:17 -07001024 }
Dave Chinner1b6d9682019-04-12 07:41:16 -07001025
1026 if (copy_to_user(arg, &fsgeo, len))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10001027 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 return 0;
1029}
1030
Darrick J. Wong7cd50062019-04-12 07:41:17 -07001031STATIC int
1032xfs_ioc_ag_geometry(
1033 struct xfs_mount *mp,
1034 void __user *arg)
1035{
1036 struct xfs_ag_geometry ageo;
1037 int error;
1038
1039 if (copy_from_user(&ageo, arg, sizeof(ageo)))
1040 return -EFAULT;
1041
1042 error = xfs_ag_get_geometry(mp, ageo.ag_number, &ageo);
1043 if (error)
1044 return error;
1045
1046 if (copy_to_user(arg, &ageo, sizeof(ageo)))
1047 return -EFAULT;
1048 return 0;
1049}
1050
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051/*
1052 * Linux extended inode flags interface.
1053 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
1055STATIC unsigned int
1056xfs_merge_ioc_xflags(
1057 unsigned int flags,
1058 unsigned int start)
1059{
1060 unsigned int xflags = start;
1061
Eric Sandeen39058a02007-02-10 18:37:10 +11001062 if (flags & FS_IMMUTABLE_FL)
Dave Chinnere7b89482016-01-04 16:44:15 +11001063 xflags |= FS_XFLAG_IMMUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 else
Dave Chinnere7b89482016-01-04 16:44:15 +11001065 xflags &= ~FS_XFLAG_IMMUTABLE;
Eric Sandeen39058a02007-02-10 18:37:10 +11001066 if (flags & FS_APPEND_FL)
Dave Chinnere7b89482016-01-04 16:44:15 +11001067 xflags |= FS_XFLAG_APPEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 else
Dave Chinnere7b89482016-01-04 16:44:15 +11001069 xflags &= ~FS_XFLAG_APPEND;
Eric Sandeen39058a02007-02-10 18:37:10 +11001070 if (flags & FS_SYNC_FL)
Dave Chinnere7b89482016-01-04 16:44:15 +11001071 xflags |= FS_XFLAG_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 else
Dave Chinnere7b89482016-01-04 16:44:15 +11001073 xflags &= ~FS_XFLAG_SYNC;
Eric Sandeen39058a02007-02-10 18:37:10 +11001074 if (flags & FS_NOATIME_FL)
Dave Chinnere7b89482016-01-04 16:44:15 +11001075 xflags |= FS_XFLAG_NOATIME;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 else
Dave Chinnere7b89482016-01-04 16:44:15 +11001077 xflags &= ~FS_XFLAG_NOATIME;
Eric Sandeen39058a02007-02-10 18:37:10 +11001078 if (flags & FS_NODUMP_FL)
Dave Chinnere7b89482016-01-04 16:44:15 +11001079 xflags |= FS_XFLAG_NODUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 else
Dave Chinnere7b89482016-01-04 16:44:15 +11001081 xflags &= ~FS_XFLAG_NODUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
1083 return xflags;
1084}
1085
1086STATIC unsigned int
1087xfs_di2lxflags(
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001088 uint16_t di_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089{
1090 unsigned int flags = 0;
1091
1092 if (di_flags & XFS_DIFLAG_IMMUTABLE)
Eric Sandeen39058a02007-02-10 18:37:10 +11001093 flags |= FS_IMMUTABLE_FL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 if (di_flags & XFS_DIFLAG_APPEND)
Eric Sandeen39058a02007-02-10 18:37:10 +11001095 flags |= FS_APPEND_FL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 if (di_flags & XFS_DIFLAG_SYNC)
Eric Sandeen39058a02007-02-10 18:37:10 +11001097 flags |= FS_SYNC_FL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 if (di_flags & XFS_DIFLAG_NOATIME)
Eric Sandeen39058a02007-02-10 18:37:10 +11001099 flags |= FS_NOATIME_FL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 if (di_flags & XFS_DIFLAG_NODUMP)
Eric Sandeen39058a02007-02-10 18:37:10 +11001101 flags |= FS_NODUMP_FL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 return flags;
1103}
1104
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001105static void
1106xfs_fill_fsxattr(
1107 struct xfs_inode *ip,
1108 bool attr,
1109 struct fsxattr *fa)
1110{
1111 simple_fill_fsxattr(fa, xfs_ip2xflags(ip));
1112 fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
1113 fa->fsx_cowextsize = ip->i_d.di_cowextsize <<
1114 ip->i_mount->m_sb.sb_blocklog;
1115 fa->fsx_projid = xfs_get_projid(ip);
1116
1117 if (attr) {
1118 if (ip->i_afp) {
1119 if (ip->i_afp->if_flags & XFS_IFEXTENTS)
1120 fa->fsx_nextents = xfs_iext_count(ip->i_afp);
1121 else
1122 fa->fsx_nextents = ip->i_d.di_anextents;
1123 } else
1124 fa->fsx_nextents = 0;
1125 } else {
1126 if (ip->i_df.if_flags & XFS_IFEXTENTS)
1127 fa->fsx_nextents = xfs_iext_count(&ip->i_df);
1128 else
1129 fa->fsx_nextents = ip->i_d.di_nextents;
1130 }
1131}
1132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133STATIC int
Christoph Hellwigc83bfab2007-10-11 17:47:00 +10001134xfs_ioc_fsgetxattr(
1135 xfs_inode_t *ip,
1136 int attr,
1137 void __user *arg)
1138{
1139 struct fsxattr fa;
1140
1141 xfs_ilock(ip, XFS_ILOCK_SHARED);
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001142 xfs_fill_fsxattr(ip, attr, &fa);
Christoph Hellwigc83bfab2007-10-11 17:47:00 +10001143 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1144
1145 if (copy_to_user(arg, &fa, sizeof(fa)))
1146 return -EFAULT;
1147 return 0;
1148}
1149
Christoph Hellwigdd606872017-09-02 08:21:20 -07001150STATIC uint16_t
1151xfs_flags2diflags(
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001152 struct xfs_inode *ip,
1153 unsigned int xflags)
1154{
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001155 /* can't set PREALLOC this way, just preserve it */
Christoph Hellwigdd606872017-09-02 08:21:20 -07001156 uint16_t di_flags =
1157 (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
1158
Dave Chinnere7b89482016-01-04 16:44:15 +11001159 if (xflags & FS_XFLAG_IMMUTABLE)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001160 di_flags |= XFS_DIFLAG_IMMUTABLE;
Dave Chinnere7b89482016-01-04 16:44:15 +11001161 if (xflags & FS_XFLAG_APPEND)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001162 di_flags |= XFS_DIFLAG_APPEND;
Dave Chinnere7b89482016-01-04 16:44:15 +11001163 if (xflags & FS_XFLAG_SYNC)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001164 di_flags |= XFS_DIFLAG_SYNC;
Dave Chinnere7b89482016-01-04 16:44:15 +11001165 if (xflags & FS_XFLAG_NOATIME)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001166 di_flags |= XFS_DIFLAG_NOATIME;
Dave Chinnere7b89482016-01-04 16:44:15 +11001167 if (xflags & FS_XFLAG_NODUMP)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001168 di_flags |= XFS_DIFLAG_NODUMP;
Dave Chinnere7b89482016-01-04 16:44:15 +11001169 if (xflags & FS_XFLAG_NODEFRAG)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001170 di_flags |= XFS_DIFLAG_NODEFRAG;
Dave Chinnere7b89482016-01-04 16:44:15 +11001171 if (xflags & FS_XFLAG_FILESTREAM)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001172 di_flags |= XFS_DIFLAG_FILESTREAM;
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001173 if (S_ISDIR(VFS_I(ip)->i_mode)) {
Dave Chinnere7b89482016-01-04 16:44:15 +11001174 if (xflags & FS_XFLAG_RTINHERIT)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001175 di_flags |= XFS_DIFLAG_RTINHERIT;
Dave Chinnere7b89482016-01-04 16:44:15 +11001176 if (xflags & FS_XFLAG_NOSYMLINKS)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001177 di_flags |= XFS_DIFLAG_NOSYMLINKS;
Dave Chinnere7b89482016-01-04 16:44:15 +11001178 if (xflags & FS_XFLAG_EXTSZINHERIT)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001179 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
Dave Chinnere7b89482016-01-04 16:44:15 +11001180 if (xflags & FS_XFLAG_PROJINHERIT)
Dave Chinner9336e3a2014-10-02 09:18:40 +10001181 di_flags |= XFS_DIFLAG_PROJINHERIT;
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001182 } else if (S_ISREG(VFS_I(ip)->i_mode)) {
Dave Chinnere7b89482016-01-04 16:44:15 +11001183 if (xflags & FS_XFLAG_REALTIME)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001184 di_flags |= XFS_DIFLAG_REALTIME;
Dave Chinnere7b89482016-01-04 16:44:15 +11001185 if (xflags & FS_XFLAG_EXTSIZE)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001186 di_flags |= XFS_DIFLAG_EXTSIZE;
1187 }
Dave Chinner58f88ca2016-01-04 16:44:15 +11001188
Christoph Hellwigdd606872017-09-02 08:21:20 -07001189 return di_flags;
1190}
Dave Chinner58f88ca2016-01-04 16:44:15 +11001191
Christoph Hellwigdd606872017-09-02 08:21:20 -07001192STATIC uint64_t
1193xfs_flags2diflags2(
1194 struct xfs_inode *ip,
1195 unsigned int xflags)
1196{
1197 uint64_t di_flags2 =
1198 (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK);
1199
Dave Chinner58f88ca2016-01-04 16:44:15 +11001200 if (xflags & FS_XFLAG_DAX)
1201 di_flags2 |= XFS_DIFLAG2_DAX;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001202 if (xflags & FS_XFLAG_COWEXTSIZE)
1203 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
Dave Chinner58f88ca2016-01-04 16:44:15 +11001204
Christoph Hellwigdd606872017-09-02 08:21:20 -07001205 return di_flags2;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001206}
1207
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001208STATIC void
1209xfs_diflags_to_linux(
1210 struct xfs_inode *ip)
1211{
David Chinnere4f75292008-08-13 16:00:45 +10001212 struct inode *inode = VFS_I(ip);
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001213 unsigned int xflags = xfs_ip2xflags(ip);
1214
Dave Chinnere7b89482016-01-04 16:44:15 +11001215 if (xflags & FS_XFLAG_IMMUTABLE)
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001216 inode->i_flags |= S_IMMUTABLE;
1217 else
1218 inode->i_flags &= ~S_IMMUTABLE;
Dave Chinnere7b89482016-01-04 16:44:15 +11001219 if (xflags & FS_XFLAG_APPEND)
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001220 inode->i_flags |= S_APPEND;
1221 else
1222 inode->i_flags &= ~S_APPEND;
Dave Chinnere7b89482016-01-04 16:44:15 +11001223 if (xflags & FS_XFLAG_SYNC)
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001224 inode->i_flags |= S_SYNC;
1225 else
1226 inode->i_flags &= ~S_SYNC;
Dave Chinnere7b89482016-01-04 16:44:15 +11001227 if (xflags & FS_XFLAG_NOATIME)
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001228 inode->i_flags |= S_NOATIME;
1229 else
1230 inode->i_flags &= ~S_NOATIME;
Christoph Hellwig742d8422017-08-30 09:23:01 -07001231#if 0 /* disabled until the flag switching races are sorted out */
Dave Chinner58f88ca2016-01-04 16:44:15 +11001232 if (xflags & FS_XFLAG_DAX)
1233 inode->i_flags |= S_DAX;
1234 else
1235 inode->i_flags &= ~S_DAX;
Christoph Hellwig742d8422017-08-30 09:23:01 -07001236#endif
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001237}
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001238
Dave Chinner29a17c02015-02-02 10:14:25 +11001239static int
1240xfs_ioctl_setattr_xflags(
1241 struct xfs_trans *tp,
1242 struct xfs_inode *ip,
1243 struct fsxattr *fa)
1244{
1245 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigdd606872017-09-02 08:21:20 -07001246 uint64_t di_flags2;
Dave Chinner29a17c02015-02-02 10:14:25 +11001247
1248 /* Can't change realtime flag if any extents are allocated. */
1249 if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
Dave Chinnere7b89482016-01-04 16:44:15 +11001250 XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME))
Dave Chinner29a17c02015-02-02 10:14:25 +11001251 return -EINVAL;
1252
1253 /* If realtime flag is set then must have realtime device */
Dave Chinnere7b89482016-01-04 16:44:15 +11001254 if (fa->fsx_xflags & FS_XFLAG_REALTIME) {
Dave Chinner29a17c02015-02-02 10:14:25 +11001255 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
1256 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize))
1257 return -EINVAL;
1258 }
1259
Darrick J. Wong1987fd72016-10-10 16:49:29 +11001260 /* Clear reflink if we are actually able to set the rt flag. */
Darrick J. Wongc8e156a2016-10-03 09:11:50 -07001261 if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip))
Darrick J. Wong1987fd72016-10-10 16:49:29 +11001262 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -07001263
Darrick J. Wong4f435eb2016-10-03 09:11:50 -07001264 /* Don't allow us to set DAX mode for a reflinked file for now. */
1265 if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip))
1266 return -EINVAL;
1267
Christoph Hellwigdd606872017-09-02 08:21:20 -07001268 /* diflags2 only valid for v3 inodes. */
1269 di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
1270 if (di_flags2 && ip->i_d.di_version < 3)
1271 return -EINVAL;
1272
1273 ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
1274 ip->i_d.di_flags2 = di_flags2;
1275
Dave Chinner29a17c02015-02-02 10:14:25 +11001276 xfs_diflags_to_linux(ip);
1277 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1278 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11001279 XFS_STATS_INC(mp, xs_ig_attrchg);
Dave Chinner29a17c02015-02-02 10:14:25 +11001280 return 0;
1281}
1282
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001283/*
Dave Chinner3a6a8542016-03-01 09:41:33 +11001284 * If we are changing DAX flags, we have to ensure the file is clean and any
1285 * cached objects in the address space are invalidated and removed. This
1286 * requires us to lock out other IO and page faults similar to a truncate
1287 * operation. The locks need to be held until the transaction has been committed
1288 * so that the cache invalidation is atomic with respect to the DAX flag
1289 * manipulation.
1290 */
1291static int
1292xfs_ioctl_setattr_dax_invalidate(
1293 struct xfs_inode *ip,
1294 struct fsxattr *fa,
1295 int *join_flags)
1296{
1297 struct inode *inode = VFS_I(ip);
Ross Zwisler6851a3d2017-09-18 14:46:03 -07001298 struct super_block *sb = inode->i_sb;
Dave Chinner3a6a8542016-03-01 09:41:33 +11001299 int error;
1300
1301 *join_flags = 0;
1302
1303 /*
1304 * It is only valid to set the DAX flag on regular files and
Dave Chinner64485432016-03-01 09:41:33 +11001305 * directories on filesystems where the block size is equal to the page
Darrick J. Wongaaacdd22018-05-31 15:07:47 -07001306 * size. On directories it serves as an inherited hint so we don't
1307 * have to check the device for dax support or flush pagecache.
Dave Chinner3a6a8542016-03-01 09:41:33 +11001308 */
Dave Chinner64485432016-03-01 09:41:33 +11001309 if (fa->fsx_xflags & FS_XFLAG_DAX) {
1310 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
1311 return -EINVAL;
Darrick J. Wongaaacdd22018-05-31 15:07:47 -07001312 if (S_ISREG(inode->i_mode) &&
1313 !bdev_dax_supported(xfs_find_bdev_for_inode(VFS_I(ip)),
Dave Jiang80660f22018-05-30 13:03:46 -07001314 sb->s_blocksize))
Dave Chinner64485432016-03-01 09:41:33 +11001315 return -EINVAL;
1316 }
Dave Chinner3a6a8542016-03-01 09:41:33 +11001317
1318 /* If the DAX state is not changing, we have nothing to do here. */
1319 if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode))
1320 return 0;
1321 if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode))
1322 return 0;
1323
Darrick J. Wongaaacdd22018-05-31 15:07:47 -07001324 if (S_ISDIR(inode->i_mode))
1325 return 0;
1326
Dave Chinner3a6a8542016-03-01 09:41:33 +11001327 /* lock, flush and invalidate mapping in preparation for flag change */
1328 xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
1329 error = filemap_write_and_wait(inode->i_mapping);
1330 if (error)
1331 goto out_unlock;
1332 error = invalidate_inode_pages2(inode->i_mapping);
1333 if (error)
1334 goto out_unlock;
1335
1336 *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL;
1337 return 0;
1338
1339out_unlock:
1340 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
1341 return error;
1342
1343}
1344
1345/*
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001346 * Set up the transaction structure for the setattr operation, checking that we
1347 * have permission to do so. On success, return a clean transaction and the
1348 * inode locked exclusively ready for further operation specific checks. On
1349 * failure, return an error without modifying or locking the inode.
Dave Chinner3a6a8542016-03-01 09:41:33 +11001350 *
1351 * The inode might already be IO locked on call. If this is the case, it is
1352 * indicated in @join_flags and we take full responsibility for ensuring they
1353 * are unlocked from now on. Hence if we have an error here, we still have to
1354 * unlock them. Otherwise, once they are joined to the transaction, they will
1355 * be unlocked on commit/cancel.
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001356 */
1357static struct xfs_trans *
1358xfs_ioctl_setattr_get_trans(
Dave Chinner3a6a8542016-03-01 09:41:33 +11001359 struct xfs_inode *ip,
1360 int join_flags)
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001361{
1362 struct xfs_mount *mp = ip->i_mount;
1363 struct xfs_trans *tp;
Dave Chinner3a6a8542016-03-01 09:41:33 +11001364 int error = -EROFS;
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001365
1366 if (mp->m_flags & XFS_MOUNT_RDONLY)
Dave Chinner3a6a8542016-03-01 09:41:33 +11001367 goto out_unlock;
1368 error = -EIO;
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001369 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner3a6a8542016-03-01 09:41:33 +11001370 goto out_unlock;
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001371
Christoph Hellwig253f4912016-04-06 09:19:55 +10001372 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001373 if (error)
Darrick J. Wong3de5eab2019-04-22 16:28:34 -07001374 goto out_unlock;
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001375
1376 xfs_ilock(ip, XFS_ILOCK_EXCL);
Dave Chinner3a6a8542016-03-01 09:41:33 +11001377 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags);
1378 join_flags = 0;
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001379
1380 /*
1381 * CAP_FOWNER overrides the following restrictions:
1382 *
1383 * The user ID of the calling process must be equal to the file owner
1384 * ID, except in cases where the CAP_FSETID capability is applicable.
1385 */
1386 if (!inode_owner_or_capable(VFS_I(ip))) {
1387 error = -EPERM;
1388 goto out_cancel;
1389 }
1390
1391 if (mp->m_flags & XFS_MOUNT_WSYNC)
1392 xfs_trans_set_sync(tp);
1393
1394 return tp;
1395
1396out_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001397 xfs_trans_cancel(tp);
Dave Chinner3a6a8542016-03-01 09:41:33 +11001398out_unlock:
1399 if (join_flags)
1400 xfs_iunlock(ip, join_flags);
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001401 return ERR_PTR(error);
1402}
1403
Iustin Pop9b94fcc2015-02-02 10:26:26 +11001404/*
1405 * extent size hint validation is somewhat cumbersome. Rules are:
1406 *
1407 * 1. extent size hint is only valid for directories and regular files
Dave Chinnere7b89482016-01-04 16:44:15 +11001408 * 2. FS_XFLAG_EXTSIZE is only valid for regular files
1409 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
Iustin Pop9b94fcc2015-02-02 10:26:26 +11001410 * 4. can only be changed on regular files if no extents are allocated
1411 * 5. can be changed on directories at any time
1412 * 6. extsize hint of 0 turns off hints, clears inode flags.
1413 * 7. Extent size must be a multiple of the appropriate block size.
1414 * 8. for non-realtime files, the extent size hint must be limited
1415 * to half the AG size to avoid alignment extending the extent beyond the
1416 * limits of the AG.
Darrick J. Wong80e4e122017-10-17 21:37:42 -07001417 *
1418 * Please keep this function in sync with xfs_scrub_inode_extsize.
Iustin Pop9b94fcc2015-02-02 10:26:26 +11001419 */
kbuild test robotf92090e2015-02-05 11:13:21 +11001420static int
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001421xfs_ioctl_setattr_check_extsize(
1422 struct xfs_inode *ip,
1423 struct fsxattr *fa)
1424{
1425 struct xfs_mount *mp = ip->i_mount;
Darrick J. Wongca29be72019-07-01 08:25:36 -07001426 xfs_extlen_t size;
1427 xfs_fsblock_t extsize_fsb;
Iustin Pop9b94fcc2015-02-02 10:26:26 +11001428
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001429 if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents &&
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001430 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
1431 return -EINVAL;
1432
Darrick J. Wongca29be72019-07-01 08:25:36 -07001433 if (fa->fsx_extsize == 0)
1434 return 0;
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001435
Darrick J. Wongca29be72019-07-01 08:25:36 -07001436 extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
1437 if (extsize_fsb > MAXEXTLEN)
1438 return -EINVAL;
1439
1440 if (XFS_IS_REALTIME_INODE(ip) ||
1441 (fa->fsx_xflags & FS_XFLAG_REALTIME)) {
1442 size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
1443 } else {
1444 size = mp->m_sb.sb_blocksize;
1445 if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001446 return -EINVAL;
Darrick J. Wongca29be72019-07-01 08:25:36 -07001447 }
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001448
Darrick J. Wongca29be72019-07-01 08:25:36 -07001449 if (fa->fsx_extsize % size)
1450 return -EINVAL;
Iustin Pop9b94fcc2015-02-02 10:26:26 +11001451
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001452 return 0;
1453}
1454
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001455/*
1456 * CoW extent size hint validation rules are:
1457 *
1458 * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
1459 * The inode does not have to have any shared blocks, but it must be a v3.
1460 * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
1461 * for a directory, the hint is propagated to new files.
1462 * 3. Can be changed on files & directories at any time.
1463 * 4. CoW extsize hint of 0 turns off hints, clears inode flags.
1464 * 5. Extent size must be a multiple of the appropriate block size.
1465 * 6. The extent size hint must be limited to half the AG size to avoid
1466 * alignment extending the extent beyond the limits of the AG.
Darrick J. Wong80e4e122017-10-17 21:37:42 -07001467 *
1468 * Please keep this function in sync with xfs_scrub_inode_cowextsize.
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001469 */
1470static int
1471xfs_ioctl_setattr_check_cowextsize(
1472 struct xfs_inode *ip,
1473 struct fsxattr *fa)
1474{
1475 struct xfs_mount *mp = ip->i_mount;
Darrick J. Wongca29be72019-07-01 08:25:36 -07001476 xfs_extlen_t size;
1477 xfs_fsblock_t cowextsize_fsb;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001478
1479 if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
1480 return 0;
1481
1482 if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) ||
1483 ip->i_d.di_version != 3)
1484 return -EINVAL;
1485
Darrick J. Wongca29be72019-07-01 08:25:36 -07001486 if (fa->fsx_cowextsize == 0)
1487 return 0;
1488
1489 cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
1490 if (cowextsize_fsb > MAXEXTLEN)
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001491 return -EINVAL;
1492
Darrick J. Wongca29be72019-07-01 08:25:36 -07001493 size = mp->m_sb.sb_blocksize;
1494 if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2)
1495 return -EINVAL;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001496
Darrick J. Wongca29be72019-07-01 08:25:36 -07001497 if (fa->fsx_cowextsize % size)
1498 return -EINVAL;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001499
1500 return 0;
1501}
1502
kbuild test robotf92090e2015-02-05 11:13:21 +11001503static int
Dave Chinner23bd0732015-02-02 10:22:53 +11001504xfs_ioctl_setattr_check_projid(
1505 struct xfs_inode *ip,
1506 struct fsxattr *fa)
1507{
1508 /* Disallow 32bit project ids if projid32bit feature is not enabled. */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001509 if (fa->fsx_projid > (uint16_t)-1 &&
Dave Chinner23bd0732015-02-02 10:22:53 +11001510 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
1511 return -EINVAL;
Dave Chinner23bd0732015-02-02 10:22:53 +11001512 return 0;
1513}
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001514
1515STATIC int
1516xfs_ioctl_setattr(
1517 xfs_inode_t *ip,
Dave Chinnerfd179b92015-02-02 10:16:25 +11001518 struct fsxattr *fa)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001519{
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001520 struct fsxattr old_fa;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001521 struct xfs_mount *mp = ip->i_mount;
1522 struct xfs_trans *tp;
Christoph Hellwig7d095252009-06-08 15:33:32 +02001523 struct xfs_dquot *udqp = NULL;
Chandra Seetharaman92f8ff72013-07-11 00:00:40 -05001524 struct xfs_dquot *pdqp = NULL;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001525 struct xfs_dquot *olddquot = NULL;
1526 int code;
Dave Chinner3a6a8542016-03-01 09:41:33 +11001527 int join_flags = 0;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001528
Christoph Hellwigcca28fb2010-06-24 11:57:09 +10001529 trace_xfs_ioctl_setattr(ip);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001530
Dave Chinner23bd0732015-02-02 10:22:53 +11001531 code = xfs_ioctl_setattr_check_projid(ip, fa);
1532 if (code)
1533 return code;
Arkadiusz Mi?kiewicz23963e542010-08-26 10:19:43 +00001534
1535 /*
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001536 * If disk quotas is on, we make sure that the dquots do exist on disk,
1537 * before we start any other transactions. Trying to do this later
1538 * is messy. We don't care to take a readlock to look at the ids
1539 * in inode here, because we can't hold it across the trans_reserve.
1540 * If the IDs do change before we take the ilock, we're covered
1541 * because the i_*dquot fields will get updated anyway.
1542 */
Dave Chinnerfd179b92015-02-02 10:16:25 +11001543 if (XFS_IS_QUOTA_ON(mp)) {
Christoph Hellwig7d095252009-06-08 15:33:32 +02001544 code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001545 ip->i_d.di_gid, fa->fsx_projid,
Chandra Seetharaman92f8ff72013-07-11 00:00:40 -05001546 XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001547 if (code)
1548 return code;
1549 }
1550
Dave Chinner3a6a8542016-03-01 09:41:33 +11001551 /*
1552 * Changing DAX config may require inode locking for mapping
1553 * invalidation. These need to be held all the way to transaction commit
1554 * or cancel time, so need to be passed through to
1555 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
1556 * appropriately.
1557 */
1558 code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags);
1559 if (code)
1560 goto error_free_dquots;
1561
1562 tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001563 if (IS_ERR(tp)) {
1564 code = PTR_ERR(tp);
1565 goto error_free_dquots;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001566 }
1567
Dave Chinnerfd179b92015-02-02 10:16:25 +11001568 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
1569 xfs_get_projid(ip) != fa->fsx_projid) {
1570 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
1571 capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
1572 if (code) /* out of quota */
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001573 goto error_trans_cancel;
Dave Chinnerfd179b92015-02-02 10:16:25 +11001574 }
1575
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001576 xfs_fill_fsxattr(ip, false, &old_fa);
1577 code = vfs_ioc_fssetxattr_check(VFS_I(ip), &old_fa, fa);
1578 if (code)
1579 goto error_trans_cancel;
1580
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001581 code = xfs_ioctl_setattr_check_extsize(ip, fa);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001582 if (code)
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001583 goto error_trans_cancel;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001584
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001585 code = xfs_ioctl_setattr_check_cowextsize(ip, fa);
1586 if (code)
1587 goto error_trans_cancel;
1588
Dave Chinner29a17c02015-02-02 10:14:25 +11001589 code = xfs_ioctl_setattr_xflags(tp, ip, fa);
1590 if (code)
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001591 goto error_trans_cancel;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001592
1593 /*
Dave Chinnerfd179b92015-02-02 10:16:25 +11001594 * Change file ownership. Must be the owner or privileged. CAP_FSETID
1595 * overrides the following restrictions:
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001596 *
Dave Chinnerfd179b92015-02-02 10:16:25 +11001597 * The set-user-ID and set-group-ID bits of a file will be cleared upon
1598 * successful return from chown()
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001599 */
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001600
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001601 if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
Dave Chinnerfd179b92015-02-02 10:16:25 +11001602 !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001603 VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
Dave Chinnerfd179b92015-02-02 10:16:25 +11001604
1605 /* Change the ownerships and register project quota modifications */
1606 if (xfs_get_projid(ip) != fa->fsx_projid) {
1607 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
1608 olddquot = xfs_qm_vop_chown(tp, ip,
1609 &ip->i_pdquot, pdqp);
Dwight Engenfd5e2aa2013-08-15 14:08:00 -04001610 }
Dave Chinnerfd179b92015-02-02 10:16:25 +11001611 ASSERT(ip->i_d.di_version > 1);
1612 xfs_set_projid(ip, fa->fsx_projid);
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001613 }
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001614
Dave Chinnera8727032014-10-02 09:20:30 +10001615 /*
1616 * Only set the extent size hint if we've already determined that the
1617 * extent size hint should be set on the inode. If no extent size flags
1618 * are set on the inode then unconditionally clear the extent size hint.
1619 */
Dave Chinnerfd179b92015-02-02 10:16:25 +11001620 if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
1621 ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
1622 else
1623 ip->i_d.di_extsize = 0;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001624 if (ip->i_d.di_version == 3 &&
1625 (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
1626 ip->i_d.di_cowextsize = fa->fsx_cowextsize >>
1627 mp->m_sb.sb_blocklog;
1628 else
1629 ip->i_d.di_cowextsize = 0;
Dave Chinnera8727032014-10-02 09:20:30 +10001630
Christoph Hellwig70393312015-06-04 13:48:08 +10001631 code = xfs_trans_commit(tp);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001632
1633 /*
1634 * Release any dquot(s) the inode had kept before chown.
1635 */
Christoph Hellwig7d095252009-06-08 15:33:32 +02001636 xfs_qm_dqrele(olddquot);
1637 xfs_qm_dqrele(udqp);
Chandra Seetharaman92f8ff72013-07-11 00:00:40 -05001638 xfs_qm_dqrele(pdqp);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001639
Christoph Hellwig288699f2010-06-23 18:11:15 +10001640 return code;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001641
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001642error_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001643 xfs_trans_cancel(tp);
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001644error_free_dquots:
Christoph Hellwig7d095252009-06-08 15:33:32 +02001645 xfs_qm_dqrele(udqp);
Chandra Seetharaman92f8ff72013-07-11 00:00:40 -05001646 xfs_qm_dqrele(pdqp);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001647 return code;
1648}
1649
Christoph Hellwigc83bfab2007-10-11 17:47:00 +10001650STATIC int
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001651xfs_ioc_fssetxattr(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 xfs_inode_t *ip,
1653 struct file *filp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 void __user *arg)
1655{
1656 struct fsxattr fa;
Jan Karad9457dc2012-06-12 16:20:39 +02001657 int error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001658
1659 if (copy_from_user(&fa, arg, sizeof(fa)))
1660 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
Jan Karad9457dc2012-06-12 16:20:39 +02001662 error = mnt_want_write_file(filp);
1663 if (error)
1664 return error;
Dave Chinnerfd179b92015-02-02 10:16:25 +11001665 error = xfs_ioctl_setattr(ip, &fa);
Jan Karad9457dc2012-06-12 16:20:39 +02001666 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10001667 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001668}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001670STATIC int
1671xfs_ioc_getxflags(
1672 xfs_inode_t *ip,
1673 void __user *arg)
1674{
1675 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001677 flags = xfs_di2lxflags(ip->i_d.di_flags);
1678 if (copy_to_user(arg, &flags, sizeof(flags)))
1679 return -EFAULT;
1680 return 0;
1681}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001683STATIC int
1684xfs_ioc_setxflags(
Dave Chinnerf96291f2015-02-02 10:15:56 +11001685 struct xfs_inode *ip,
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001686 struct file *filp,
1687 void __user *arg)
1688{
Dave Chinnerf96291f2015-02-02 10:15:56 +11001689 struct xfs_trans *tp;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001690 struct fsxattr fa;
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001691 struct fsxattr old_fa;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001692 unsigned int flags;
Dave Chinner3a6a8542016-03-01 09:41:33 +11001693 int join_flags = 0;
Dave Chinnerf96291f2015-02-02 10:15:56 +11001694 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001696 if (copy_from_user(&flags, arg, sizeof(flags)))
1697 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001699 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
1700 FS_NOATIME_FL | FS_NODUMP_FL | \
1701 FS_SYNC_FL))
1702 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001704 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
Jan Karad9457dc2012-06-12 16:20:39 +02001706 error = mnt_want_write_file(filp);
1707 if (error)
1708 return error;
Dave Chinnerf96291f2015-02-02 10:15:56 +11001709
Dave Chinner3a6a8542016-03-01 09:41:33 +11001710 /*
1711 * Changing DAX config may require inode locking for mapping
1712 * invalidation. These need to be held all the way to transaction commit
1713 * or cancel time, so need to be passed through to
1714 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
1715 * appropriately.
1716 */
1717 error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags);
1718 if (error)
1719 goto out_drop_write;
1720
1721 tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
Dave Chinnerf96291f2015-02-02 10:15:56 +11001722 if (IS_ERR(tp)) {
1723 error = PTR_ERR(tp);
1724 goto out_drop_write;
1725 }
1726
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001727 xfs_fill_fsxattr(ip, false, &old_fa);
1728 error = vfs_ioc_fssetxattr_check(VFS_I(ip), &old_fa, &fa);
1729 if (error) {
1730 xfs_trans_cancel(tp);
1731 goto out_drop_write;
1732 }
1733
Dave Chinnerf96291f2015-02-02 10:15:56 +11001734 error = xfs_ioctl_setattr_xflags(tp, ip, &fa);
1735 if (error) {
Christoph Hellwig4906e212015-06-04 13:47:56 +10001736 xfs_trans_cancel(tp);
Dave Chinnerf96291f2015-02-02 10:15:56 +11001737 goto out_drop_write;
1738 }
1739
Christoph Hellwig70393312015-06-04 13:48:08 +10001740 error = xfs_trans_commit(tp);
Dave Chinnerf96291f2015-02-02 10:15:56 +11001741out_drop_write:
Jan Karad9457dc2012-06-12 16:20:39 +02001742 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10001743 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744}
1745
Christoph Hellwig232b51942017-10-17 14:16:19 -07001746static bool
1747xfs_getbmap_format(
1748 struct kgetbmap *p,
1749 struct getbmapx __user *u,
1750 size_t recsize)
Eric Sandeen8a7141a2008-11-28 14:23:35 +11001751{
Christoph Hellwig232b51942017-10-17 14:16:19 -07001752 if (put_user(p->bmv_offset, &u->bmv_offset) ||
1753 put_user(p->bmv_block, &u->bmv_block) ||
1754 put_user(p->bmv_length, &u->bmv_length) ||
1755 put_user(0, &u->bmv_count) ||
1756 put_user(0, &u->bmv_entries))
1757 return false;
1758 if (recsize < sizeof(struct getbmapx))
1759 return true;
1760 if (put_user(0, &u->bmv_iflags) ||
1761 put_user(p->bmv_oflags, &u->bmv_oflags) ||
1762 put_user(0, &u->bmv_unused1) ||
1763 put_user(0, &u->bmv_unused2))
1764 return false;
1765 return true;
Eric Sandeen8a7141a2008-11-28 14:23:35 +11001766}
1767
1768STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769xfs_ioc_getbmap(
Christoph Hellwig8f3e2052016-07-20 11:29:35 +10001770 struct file *file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 unsigned int cmd,
1772 void __user *arg)
1773{
Darrick J. Wongbe6324c2017-04-03 15:17:57 -07001774 struct getbmapx bmx = { 0 };
Christoph Hellwig232b51942017-10-17 14:16:19 -07001775 struct kgetbmap *buf;
1776 size_t recsize;
1777 int error, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Christoph Hellwig232b51942017-10-17 14:16:19 -07001779 switch (cmd) {
1780 case XFS_IOC_GETBMAPA:
1781 bmx.bmv_iflags = BMV_IF_ATTRFORK;
1782 /*FALLTHRU*/
1783 case XFS_IOC_GETBMAP:
1784 if (file->f_mode & FMODE_NOCMTIME)
1785 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
1786 /* struct getbmap is a strict subset of struct getbmapx. */
1787 recsize = sizeof(struct getbmap);
1788 break;
1789 case XFS_IOC_GETBMAPX:
1790 recsize = sizeof(struct getbmapx);
1791 break;
1792 default:
1793 return -EINVAL;
1794 }
1795
1796 if (copy_from_user(&bmx, arg, recsize))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10001797 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
Eric Sandeen8a7141a2008-11-28 14:23:35 +11001799 if (bmx.bmv_count < 2)
Eric Sandeenb474c7a2014-06-22 15:04:54 +10001800 return -EINVAL;
Christoph Hellwig232b51942017-10-17 14:16:19 -07001801 if (bmx.bmv_count > ULONG_MAX / recsize)
1802 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
Christoph Hellwig232b51942017-10-17 14:16:19 -07001804 buf = kmem_zalloc_large(bmx.bmv_count * sizeof(*buf), 0);
1805 if (!buf)
1806 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
Christoph Hellwig232b51942017-10-17 14:16:19 -07001808 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 if (error)
Christoph Hellwig232b51942017-10-17 14:16:19 -07001810 goto out_free_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
Christoph Hellwig232b51942017-10-17 14:16:19 -07001812 error = -EFAULT;
1813 if (copy_to_user(arg, &bmx, recsize))
1814 goto out_free_buf;
1815 arg += recsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Christoph Hellwig232b51942017-10-17 14:16:19 -07001817 for (i = 0; i < bmx.bmv_entries; i++) {
1818 if (!xfs_getbmap_format(buf + i, arg, recsize))
1819 goto out_free_buf;
1820 arg += recsize;
1821 }
Eric Sandeen8a7141a2008-11-28 14:23:35 +11001822
Christoph Hellwig232b51942017-10-17 14:16:19 -07001823 error = 0;
1824out_free_buf:
1825 kmem_free(buf);
Christophe JAILLET132bf672018-11-06 07:50:50 -08001826 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827}
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001828
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001829struct getfsmap_info {
1830 struct xfs_mount *mp;
Christoph Hellwig9d17e142017-04-21 11:24:41 -07001831 struct fsmap_head __user *data;
1832 unsigned int idx;
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001833 __u32 last_flags;
1834};
1835
1836STATIC int
1837xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv)
1838{
1839 struct getfsmap_info *info = priv;
1840 struct fsmap fm;
1841
1842 trace_xfs_getfsmap_mapping(info->mp, xfm);
1843
1844 info->last_flags = xfm->fmr_flags;
1845 xfs_fsmap_from_internal(&fm, xfm);
Christoph Hellwig9d17e142017-04-21 11:24:41 -07001846 if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm,
1847 sizeof(struct fsmap)))
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001848 return -EFAULT;
1849
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001850 return 0;
1851}
1852
1853STATIC int
1854xfs_ioc_getfsmap(
1855 struct xfs_inode *ip,
Christoph Hellwig9d17e142017-04-21 11:24:41 -07001856 struct fsmap_head __user *arg)
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001857{
Christoph Hellwigef2b67e2017-04-21 11:24:40 -07001858 struct getfsmap_info info = { NULL };
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001859 struct xfs_fsmap_head xhead = {0};
1860 struct fsmap_head head;
1861 bool aborted = false;
1862 int error;
1863
1864 if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
1865 return -EFAULT;
1866 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
1867 memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
1868 sizeof(head.fmh_keys[0].fmr_reserved)) ||
1869 memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
1870 sizeof(head.fmh_keys[1].fmr_reserved)))
1871 return -EINVAL;
1872
1873 xhead.fmh_iflags = head.fmh_iflags;
1874 xhead.fmh_count = head.fmh_count;
1875 xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
1876 xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
1877
1878 trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
1879 trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
1880
1881 info.mp = ip->i_mount;
Christoph Hellwig9d17e142017-04-21 11:24:41 -07001882 info.data = arg;
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001883 error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
1884 if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
1885 error = 0;
1886 aborted = true;
1887 } else if (error)
1888 return error;
1889
1890 /* If we didn't abort, set the "last" flag in the last fmx */
Darrick J. Wong12e4a382017-04-23 10:45:21 -07001891 if (!aborted && info.idx) {
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001892 info.last_flags |= FMR_OF_LAST;
Christoph Hellwig9d17e142017-04-21 11:24:41 -07001893 if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags,
1894 &info.last_flags, sizeof(info.last_flags)))
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001895 return -EFAULT;
1896 }
1897
1898 /* copy back header */
1899 head.fmh_entries = xhead.fmh_entries;
1900 head.fmh_oflags = xhead.fmh_oflags;
1901 if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
1902 return -EFAULT;
1903
1904 return 0;
1905}
1906
Darrick J. Wong36fd6e82017-10-17 21:37:34 -07001907STATIC int
1908xfs_ioc_scrub_metadata(
1909 struct xfs_inode *ip,
1910 void __user *arg)
1911{
1912 struct xfs_scrub_metadata scrub;
1913 int error;
1914
1915 if (!capable(CAP_SYS_ADMIN))
1916 return -EPERM;
1917
1918 if (copy_from_user(&scrub, arg, sizeof(scrub)))
1919 return -EFAULT;
1920
1921 error = xfs_scrub_metadata(ip, &scrub);
1922 if (error)
1923 return error;
1924
1925 if (copy_to_user(arg, &scrub, sizeof(scrub)))
1926 return -EFAULT;
1927
1928 return 0;
1929}
1930
Dave Chinnera133d952013-08-12 20:49:48 +10001931int
1932xfs_ioc_swapext(
1933 xfs_swapext_t *sxp)
1934{
1935 xfs_inode_t *ip, *tip;
1936 struct fd f, tmp;
1937 int error = 0;
1938
1939 /* Pull information for the target fd */
1940 f = fdget((int)sxp->sx_fdtarget);
1941 if (!f.file) {
Dave Chinner24513372014-06-25 14:58:08 +10001942 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001943 goto out;
1944 }
1945
1946 if (!(f.file->f_mode & FMODE_WRITE) ||
1947 !(f.file->f_mode & FMODE_READ) ||
1948 (f.file->f_flags & O_APPEND)) {
Dave Chinner24513372014-06-25 14:58:08 +10001949 error = -EBADF;
Dave Chinnera133d952013-08-12 20:49:48 +10001950 goto out_put_file;
1951 }
1952
1953 tmp = fdget((int)sxp->sx_fdtmp);
1954 if (!tmp.file) {
Dave Chinner24513372014-06-25 14:58:08 +10001955 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001956 goto out_put_file;
1957 }
1958
1959 if (!(tmp.file->f_mode & FMODE_WRITE) ||
1960 !(tmp.file->f_mode & FMODE_READ) ||
1961 (tmp.file->f_flags & O_APPEND)) {
Dave Chinner24513372014-06-25 14:58:08 +10001962 error = -EBADF;
Dave Chinnera133d952013-08-12 20:49:48 +10001963 goto out_put_tmp_file;
1964 }
1965
1966 if (IS_SWAPFILE(file_inode(f.file)) ||
1967 IS_SWAPFILE(file_inode(tmp.file))) {
Dave Chinner24513372014-06-25 14:58:08 +10001968 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001969 goto out_put_tmp_file;
1970 }
1971
Jann Horn7f1b6242016-07-20 10:30:30 +10001972 /*
1973 * We need to ensure that the fds passed in point to XFS inodes
1974 * before we cast and access them as XFS structures as we have no
1975 * control over what the user passes us here.
1976 */
1977 if (f.file->f_op != &xfs_file_operations ||
1978 tmp.file->f_op != &xfs_file_operations) {
1979 error = -EINVAL;
1980 goto out_put_tmp_file;
1981 }
1982
Dave Chinnera133d952013-08-12 20:49:48 +10001983 ip = XFS_I(file_inode(f.file));
1984 tip = XFS_I(file_inode(tmp.file));
1985
1986 if (ip->i_mount != tip->i_mount) {
Dave Chinner24513372014-06-25 14:58:08 +10001987 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001988 goto out_put_tmp_file;
1989 }
1990
1991 if (ip->i_ino == tip->i_ino) {
Dave Chinner24513372014-06-25 14:58:08 +10001992 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001993 goto out_put_tmp_file;
1994 }
1995
1996 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Dave Chinner24513372014-06-25 14:58:08 +10001997 error = -EIO;
Dave Chinnera133d952013-08-12 20:49:48 +10001998 goto out_put_tmp_file;
1999 }
2000
2001 error = xfs_swap_extents(ip, tip, sxp);
2002
2003 out_put_tmp_file:
2004 fdput(tmp);
2005 out_put_file:
2006 fdput(f);
2007 out:
2008 return error;
2009}
2010
Eric Sandeenf7664b32018-05-15 13:21:48 -07002011static int
2012xfs_ioc_getlabel(
2013 struct xfs_mount *mp,
2014 char __user *user_label)
2015{
2016 struct xfs_sb *sbp = &mp->m_sb;
2017 char label[XFSLABEL_MAX + 1];
2018
2019 /* Paranoia */
2020 BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX);
2021
Arnd Bergmann4bb8b652018-06-05 19:42:45 -07002022 /* 1 larger than sb_fname, so this ensures a trailing NUL char */
2023 memset(label, 0, sizeof(label));
Eric Sandeenf7664b32018-05-15 13:21:48 -07002024 spin_lock(&mp->m_sb_lock);
Arnd Bergmann4bb8b652018-06-05 19:42:45 -07002025 strncpy(label, sbp->sb_fname, XFSLABEL_MAX);
Eric Sandeenf7664b32018-05-15 13:21:48 -07002026 spin_unlock(&mp->m_sb_lock);
2027
Arnd Bergmann4bb8b652018-06-05 19:42:45 -07002028 if (copy_to_user(user_label, label, sizeof(label)))
Eric Sandeenf7664b32018-05-15 13:21:48 -07002029 return -EFAULT;
2030 return 0;
2031}
2032
2033static int
2034xfs_ioc_setlabel(
2035 struct file *filp,
2036 struct xfs_mount *mp,
2037 char __user *newlabel)
2038{
2039 struct xfs_sb *sbp = &mp->m_sb;
2040 char label[XFSLABEL_MAX + 1];
2041 size_t len;
2042 int error;
2043
2044 if (!capable(CAP_SYS_ADMIN))
2045 return -EPERM;
2046 /*
2047 * The generic ioctl allows up to FSLABEL_MAX chars, but XFS is much
2048 * smaller, at 12 bytes. We copy one more to be sure we find the
2049 * (required) NULL character to test the incoming label length.
2050 * NB: The on disk label doesn't need to be null terminated.
2051 */
2052 if (copy_from_user(label, newlabel, XFSLABEL_MAX + 1))
2053 return -EFAULT;
2054 len = strnlen(label, XFSLABEL_MAX + 1);
2055 if (len > sizeof(sbp->sb_fname))
2056 return -EINVAL;
2057
2058 error = mnt_want_write_file(filp);
2059 if (error)
2060 return error;
2061
2062 spin_lock(&mp->m_sb_lock);
2063 memset(sbp->sb_fname, 0, sizeof(sbp->sb_fname));
Arnd Bergmann4bb8b652018-06-05 19:42:45 -07002064 memcpy(sbp->sb_fname, label, len);
Eric Sandeenf7664b32018-05-15 13:21:48 -07002065 spin_unlock(&mp->m_sb_lock);
2066
2067 /*
2068 * Now we do several things to satisfy userspace.
2069 * In addition to normal logging of the primary superblock, we also
2070 * immediately write these changes to sector zero for the primary, then
2071 * update all backup supers (as xfs_db does for a label change), then
2072 * invalidate the block device page cache. This is so that any prior
2073 * buffered reads from userspace (i.e. from blkid) are invalidated,
2074 * and userspace will see the newly-written label.
2075 */
2076 error = xfs_sync_sb_buf(mp);
2077 if (error)
2078 goto out;
2079 /*
2080 * growfs also updates backup supers so lock against that.
2081 */
2082 mutex_lock(&mp->m_growlock);
2083 error = xfs_update_secondary_sbs(mp);
2084 mutex_unlock(&mp->m_growlock);
2085
2086 invalidate_bdev(mp->m_ddev_targp->bt_bdev);
2087
2088out:
2089 mnt_drop_write_file(filp);
2090 return error;
2091}
2092
Christoph Hellwig4d4be482008-12-09 04:47:33 -05002093/*
2094 * Note: some of the ioctl's return positive numbers as a
2095 * byte count indicating success, such as readlink_by_handle.
2096 * So we don't "sign flip" like most other routines. This means
2097 * true errors need to be returned as a negative value.
2098 */
2099long
2100xfs_file_ioctl(
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002101 struct file *filp,
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002102 unsigned int cmd,
Christoph Hellwig4d4be482008-12-09 04:47:33 -05002103 unsigned long p)
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002104{
Al Viro496ad9a2013-01-23 17:07:38 -05002105 struct inode *inode = file_inode(filp);
Christoph Hellwig4d4be482008-12-09 04:47:33 -05002106 struct xfs_inode *ip = XFS_I(inode);
2107 struct xfs_mount *mp = ip->i_mount;
2108 void __user *arg = (void __user *)p;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002109 int error;
2110
Christoph Hellwigcca28fb2010-06-24 11:57:09 +10002111 trace_xfs_file_ioctl(ip);
Christoph Hellwig4d4be482008-12-09 04:47:33 -05002112
2113 switch (cmd) {
Christoph Hellwiga46db602011-01-07 13:02:04 +00002114 case FITRIM:
2115 return xfs_ioc_trim(mp, arg);
Eric Sandeenf7664b32018-05-15 13:21:48 -07002116 case FS_IOC_GETFSLABEL:
2117 return xfs_ioc_getlabel(mp, arg);
2118 case FS_IOC_SETFSLABEL:
2119 return xfs_ioc_setlabel(filp, mp, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002120 case XFS_IOC_ALLOCSP:
2121 case XFS_IOC_FREESP:
2122 case XFS_IOC_RESVSP:
2123 case XFS_IOC_UNRESVSP:
2124 case XFS_IOC_ALLOCSP64:
2125 case XFS_IOC_FREESP64:
2126 case XFS_IOC_RESVSP64:
Dave Chinner44722352010-08-24 12:02:11 +10002127 case XFS_IOC_UNRESVSP64:
2128 case XFS_IOC_ZERO_RANGE: {
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002129 xfs_flock64_t bf;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002130
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002131 if (copy_from_user(&bf, arg, sizeof(bf)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002132 return -EFAULT;
Christoph Hellwig8f3e2052016-07-20 11:29:35 +10002133 return xfs_ioc_space(filp, cmd, &bf);
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002134 }
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002135 case XFS_IOC_DIOINFO: {
2136 struct dioattr da;
2137 xfs_buftarg_t *target =
2138 XFS_IS_REALTIME_INODE(ip) ?
2139 mp->m_rtdev_targp : mp->m_ddev_targp;
2140
Eric Sandeen7c71ee72014-01-21 16:46:23 -06002141 da.d_mem = da.d_miniosz = target->bt_logical_sectorsize;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002142 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
2143
2144 if (copy_to_user(arg, &da, sizeof(da)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002145 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002146 return 0;
2147 }
2148
2149 case XFS_IOC_FSBULKSTAT_SINGLE:
2150 case XFS_IOC_FSBULKSTAT:
2151 case XFS_IOC_FSINUMBERS:
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -07002152 return xfs_ioc_fsbulkstat(mp, cmd, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002153
Darrick J. Wong0448b6f2019-07-03 20:36:27 -07002154 case XFS_IOC_BULKSTAT:
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002155 return xfs_ioc_bulkstat(mp, cmd, arg);
Darrick J. Wongfba97602019-07-03 20:36:28 -07002156 case XFS_IOC_INUMBERS:
2157 return xfs_ioc_inumbers(mp, cmd, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002158
2159 case XFS_IOC_FSGEOMETRY_V1:
Dave Chinner1b6d9682019-04-12 07:41:16 -07002160 return xfs_ioc_fsgeometry(mp, arg, 3);
2161 case XFS_IOC_FSGEOMETRY_V4:
2162 return xfs_ioc_fsgeometry(mp, arg, 4);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002163 case XFS_IOC_FSGEOMETRY:
Dave Chinner1b6d9682019-04-12 07:41:16 -07002164 return xfs_ioc_fsgeometry(mp, arg, 5);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002165
Darrick J. Wong7cd50062019-04-12 07:41:17 -07002166 case XFS_IOC_AG_GEOMETRY:
2167 return xfs_ioc_ag_geometry(mp, arg);
2168
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002169 case XFS_IOC_GETVERSION:
2170 return put_user(inode->i_generation, (int __user *)arg);
2171
2172 case XFS_IOC_FSGETXATTR:
2173 return xfs_ioc_fsgetxattr(ip, 0, arg);
2174 case XFS_IOC_FSGETXATTRA:
2175 return xfs_ioc_fsgetxattr(ip, 1, arg);
Lachlan McIlroy3b2816b2008-04-18 12:43:35 +10002176 case XFS_IOC_FSSETXATTR:
Lachlan McIlroy65e67f52008-04-18 12:59:45 +10002177 return xfs_ioc_fssetxattr(ip, filp, arg);
2178 case XFS_IOC_GETXFLAGS:
2179 return xfs_ioc_getxflags(ip, arg);
2180 case XFS_IOC_SETXFLAGS:
2181 return xfs_ioc_setxflags(ip, filp, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002182
2183 case XFS_IOC_FSSETDM: {
2184 struct fsdmidata dmi;
2185
2186 if (copy_from_user(&dmi, arg, sizeof(dmi)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002187 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002188
Jan Karad9457dc2012-06-12 16:20:39 +02002189 error = mnt_want_write_file(filp);
2190 if (error)
2191 return error;
2192
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002193 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
2194 dmi.fsd_dmstate);
Jan Karad9457dc2012-06-12 16:20:39 +02002195 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10002196 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002197 }
2198
2199 case XFS_IOC_GETBMAP:
2200 case XFS_IOC_GETBMAPA:
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002201 case XFS_IOC_GETBMAPX:
Christoph Hellwig232b51942017-10-17 14:16:19 -07002202 return xfs_ioc_getbmap(filp, cmd, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002203
Darrick J. Wonge89c0412017-03-28 14:56:37 -07002204 case FS_IOC_GETFSMAP:
2205 return xfs_ioc_getfsmap(ip, arg);
2206
Darrick J. Wong36fd6e82017-10-17 21:37:34 -07002207 case XFS_IOC_SCRUB_METADATA:
2208 return xfs_ioc_scrub_metadata(ip, arg);
2209
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002210 case XFS_IOC_FD_TO_HANDLE:
2211 case XFS_IOC_PATH_TO_HANDLE:
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002212 case XFS_IOC_PATH_TO_FSHANDLE: {
2213 xfs_fsop_handlereq_t hreq;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002214
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002215 if (copy_from_user(&hreq, arg, sizeof(hreq)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002216 return -EFAULT;
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002217 return xfs_find_handle(cmd, &hreq);
2218 }
2219 case XFS_IOC_OPEN_BY_HANDLE: {
2220 xfs_fsop_handlereq_t hreq;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002221
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002222 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002223 return -EFAULT;
Christoph Hellwigd296d302009-01-19 02:02:57 +01002224 return xfs_open_by_handle(filp, &hreq);
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002225 }
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002226 case XFS_IOC_FSSETDM_BY_HANDLE:
Christoph Hellwigd296d302009-01-19 02:02:57 +01002227 return xfs_fssetdm_by_handle(filp, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002228
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002229 case XFS_IOC_READLINK_BY_HANDLE: {
2230 xfs_fsop_handlereq_t hreq;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002231
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002232 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002233 return -EFAULT;
Christoph Hellwigd296d302009-01-19 02:02:57 +01002234 return xfs_readlink_by_handle(filp, &hreq);
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002235 }
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002236 case XFS_IOC_ATTRLIST_BY_HANDLE:
Christoph Hellwigd296d302009-01-19 02:02:57 +01002237 return xfs_attrlist_by_handle(filp, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002238
2239 case XFS_IOC_ATTRMULTI_BY_HANDLE:
Christoph Hellwigd296d302009-01-19 02:02:57 +01002240 return xfs_attrmulti_by_handle(filp, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002241
2242 case XFS_IOC_SWAPEXT: {
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002243 struct xfs_swapext sxp;
2244
2245 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002246 return -EFAULT;
Jan Karad9457dc2012-06-12 16:20:39 +02002247 error = mnt_want_write_file(filp);
2248 if (error)
2249 return error;
Dave Chinnera133d952013-08-12 20:49:48 +10002250 error = xfs_ioc_swapext(&sxp);
Jan Karad9457dc2012-06-12 16:20:39 +02002251 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10002252 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002253 }
2254
2255 case XFS_IOC_FSCOUNTS: {
2256 xfs_fsop_counts_t out;
2257
Eric Sandeen91083262019-05-01 20:26:30 -07002258 xfs_fs_counts(mp, &out);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002259
2260 if (copy_to_user(arg, &out, sizeof(out)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002261 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002262 return 0;
2263 }
2264
2265 case XFS_IOC_SET_RESBLKS: {
2266 xfs_fsop_resblks_t inout;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07002267 uint64_t in;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002268
2269 if (!capable(CAP_SYS_ADMIN))
2270 return -EPERM;
2271
Eric Sandeend5db0f92010-02-05 22:59:53 +00002272 if (mp->m_flags & XFS_MOUNT_RDONLY)
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002273 return -EROFS;
Eric Sandeend5db0f92010-02-05 22:59:53 +00002274
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002275 if (copy_from_user(&inout, arg, sizeof(inout)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002276 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002277
Jan Karad9457dc2012-06-12 16:20:39 +02002278 error = mnt_want_write_file(filp);
2279 if (error)
2280 return error;
2281
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002282 /* input parameter is passed in resblks field of structure */
2283 in = inout.resblks;
2284 error = xfs_reserve_blocks(mp, &in, &inout);
Jan Karad9457dc2012-06-12 16:20:39 +02002285 mnt_drop_write_file(filp);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002286 if (error)
Dave Chinner24513372014-06-25 14:58:08 +10002287 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002288
2289 if (copy_to_user(arg, &inout, sizeof(inout)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002290 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002291 return 0;
2292 }
2293
2294 case XFS_IOC_GET_RESBLKS: {
2295 xfs_fsop_resblks_t out;
2296
2297 if (!capable(CAP_SYS_ADMIN))
2298 return -EPERM;
2299
2300 error = xfs_reserve_blocks(mp, NULL, &out);
2301 if (error)
Dave Chinner24513372014-06-25 14:58:08 +10002302 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002303
2304 if (copy_to_user(arg, &out, sizeof(out)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002305 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002306
2307 return 0;
2308 }
2309
2310 case XFS_IOC_FSGROWFSDATA: {
2311 xfs_growfs_data_t in;
2312
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002313 if (copy_from_user(&in, arg, sizeof(in)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002314 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002315
Jan Karad9457dc2012-06-12 16:20:39 +02002316 error = mnt_want_write_file(filp);
2317 if (error)
2318 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002319 error = xfs_growfs_data(mp, &in);
Jan Karad9457dc2012-06-12 16:20:39 +02002320 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10002321 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002322 }
2323
2324 case XFS_IOC_FSGROWFSLOG: {
2325 xfs_growfs_log_t in;
2326
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002327 if (copy_from_user(&in, arg, sizeof(in)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002328 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002329
Jan Karad9457dc2012-06-12 16:20:39 +02002330 error = mnt_want_write_file(filp);
2331 if (error)
2332 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002333 error = xfs_growfs_log(mp, &in);
Jan Karad9457dc2012-06-12 16:20:39 +02002334 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10002335 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002336 }
2337
2338 case XFS_IOC_FSGROWFSRT: {
2339 xfs_growfs_rt_t in;
2340
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002341 if (copy_from_user(&in, arg, sizeof(in)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002342 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002343
Jan Karad9457dc2012-06-12 16:20:39 +02002344 error = mnt_want_write_file(filp);
2345 if (error)
2346 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002347 error = xfs_growfs_rt(mp, &in);
Jan Karad9457dc2012-06-12 16:20:39 +02002348 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10002349 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002350 }
2351
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002352 case XFS_IOC_GOINGDOWN: {
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07002353 uint32_t in;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002354
2355 if (!capable(CAP_SYS_ADMIN))
2356 return -EPERM;
2357
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07002358 if (get_user(in, (uint32_t __user *)arg))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002359 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002360
Dave Chinner24513372014-06-25 14:58:08 +10002361 return xfs_fs_goingdown(mp, in);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002362 }
2363
2364 case XFS_IOC_ERROR_INJECTION: {
2365 xfs_error_injection_t in;
2366
2367 if (!capable(CAP_SYS_ADMIN))
2368 return -EPERM;
2369
2370 if (copy_from_user(&in, arg, sizeof(in)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002371 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002372
Darrick J. Wong31965ef2017-06-20 17:54:46 -07002373 return xfs_errortag_add(mp, in.errtag);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002374 }
2375
2376 case XFS_IOC_ERROR_CLEARALL:
2377 if (!capable(CAP_SYS_ADMIN))
2378 return -EPERM;
2379
Darrick J. Wong31965ef2017-06-20 17:54:46 -07002380 return xfs_errortag_clearall(mp);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002381
Brian Foster8ca149d2012-11-07 12:21:12 -05002382 case XFS_IOC_FREE_EOFBLOCKS: {
Dwight Engenb9fe5052013-08-15 14:08:02 -04002383 struct xfs_fs_eofblocks eofb;
2384 struct xfs_eofblocks keofb;
Brian Foster8ca149d2012-11-07 12:21:12 -05002385
Dwight Engen8c567a72013-08-15 14:08:03 -04002386 if (!capable(CAP_SYS_ADMIN))
2387 return -EPERM;
2388
2389 if (mp->m_flags & XFS_MOUNT_RDONLY)
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002390 return -EROFS;
Dwight Engen8c567a72013-08-15 14:08:03 -04002391
Brian Foster8ca149d2012-11-07 12:21:12 -05002392 if (copy_from_user(&eofb, arg, sizeof(eofb)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002393 return -EFAULT;
Brian Foster8ca149d2012-11-07 12:21:12 -05002394
Dwight Engenb9fe5052013-08-15 14:08:02 -04002395 error = xfs_fs_eofblocks_from_user(&eofb, &keofb);
2396 if (error)
Dave Chinner24513372014-06-25 14:58:08 +10002397 return error;
Brian Foster8ca149d2012-11-07 12:21:12 -05002398
Dave Chinner24513372014-06-25 14:58:08 +10002399 return xfs_icache_free_eofblocks(mp, &keofb);
Brian Foster8ca149d2012-11-07 12:21:12 -05002400 }
2401
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002402 default:
2403 return -ENOTTY;
2404 }
2405}