blob: aec8e1572386e7d8653b537c8326ecc2b7dabced [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +11009#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_inode.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "xfs_rtalloc.h"
Darrick J. Wong2810bd62019-07-02 09:39:40 -070015#include "xfs_iwalk.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "xfs_itable.h"
Nathan Scotta844f452005-11-02 14:38:42 +110017#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs_attr.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bmap.h"
Dave Chinner68988112013-08-12 20:49:42 +100020#include "xfs_bmap_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "xfs_fsops.h"
Christoph Hellwiga46db602011-01-07 13:02:04 +000022#include "xfs_discard.h"
Christoph Hellwig25fe55e2008-07-18 17:13:20 +100023#include "xfs_quota.h"
Christoph Hellwigd296d302009-01-19 02:02:57 +010024#include "xfs_export.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000025#include "xfs_trace.h"
Brian Foster8ca149d2012-11-07 12:21:12 -050026#include "xfs_icache.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110027#include "xfs_trans.h"
Andreas Gruenbacher47e1bf62015-11-03 12:56:17 +110028#include "xfs_acl.h"
Darrick J. Wonge89c0412017-03-28 14:56:37 -070029#include "xfs_btree.h"
30#include <linux/fsmap.h>
31#include "xfs_fsmap.h"
Darrick J. Wong36fd6e82017-10-17 21:37:34 -070032#include "scrub/xfs_scrub.h"
Darrick J. Wongc368ebc2018-01-08 10:51:27 -080033#include "xfs_sb.h"
Darrick J. Wong7cd50062019-04-12 07:41:17 -070034#include "xfs_ag.h"
Darrick J. Wongc23232d2019-04-12 07:41:17 -070035#include "xfs_health.h"
Christoph Hellwig7a42c702019-10-24 22:26:27 -070036#include "xfs_reflink.h"
Darrick J. Wong5f213dd2019-11-06 17:19:33 -080037#include "xfs_ioctl.h"
Christoph Hellwiga2544622020-02-26 17:30:33 -080038#include "xfs_da_format.h"
39#include "xfs_da_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mount.h>
42#include <linux/namei.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44/*
45 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
46 * a file or fs handle.
47 *
48 * XFS_IOC_PATH_TO_FSHANDLE
49 * returns fs handle for a mount point or path within that mount point
50 * XFS_IOC_FD_TO_HANDLE
51 * returns full handle for a FD opened in user space
52 * XFS_IOC_PATH_TO_HANDLE
53 * returns full handle for a path
54 */
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -060055int
Linus Torvalds1da177e2005-04-16 15:20:36 -070056xfs_find_handle(
57 unsigned int cmd,
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -060058 xfs_fsop_handlereq_t *hreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
60 int hsize;
61 xfs_handle_t handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 struct inode *inode;
Dave Chinnera30b0362013-09-02 20:49:36 +100063 struct fd f = {NULL};
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010064 struct path path;
Al Viro2903ff02012-08-28 12:52:22 -040065 int error;
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010066 struct xfs_inode *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010068 if (cmd == XFS_IOC_FD_TO_HANDLE) {
Al Viro2903ff02012-08-28 12:52:22 -040069 f = fdget(hreq->fd);
70 if (!f.file)
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010071 return -EBADF;
Al Viro496ad9a2013-01-23 17:07:38 -050072 inode = file_inode(f.file);
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010073 } else {
Al Viroce6595a2019-07-14 16:42:44 -040074 error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010075 if (error)
76 return error;
David Howells2b0143b2015-03-17 22:25:59 +000077 inode = d_inode(path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 }
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010079 ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010081 /*
82 * We can only generate handles for inodes residing on a XFS filesystem,
83 * and only for regular files, directories or symbolic links.
84 */
85 error = -EINVAL;
86 if (inode->i_sb->s_magic != XFS_SB_MAGIC)
87 goto out_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010089 error = -EBADF;
90 if (!S_ISREG(inode->i_mode) &&
91 !S_ISDIR(inode->i_mode) &&
92 !S_ISLNK(inode->i_mode))
93 goto out_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010096 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Christoph Hellwig4346cdd2009-02-08 21:51:14 +010098 if (cmd == XFS_IOC_PATH_TO_FSHANDLE) {
99 /*
100 * This handle only contains an fsid, zero the rest.
101 */
102 memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
103 hsize = sizeof(xfs_fsid_t);
104 } else {
Christoph Hellwigc6143912007-09-14 15:22:37 +1000105 handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
106 sizeof(handle.ha_fid.fid_len);
107 handle.ha_fid.fid_pad = 0;
Dave Chinner9e9a2672016-02-09 16:54:58 +1100108 handle.ha_fid.fid_gen = inode->i_generation;
Christoph Hellwigc6143912007-09-14 15:22:37 +1000109 handle.ha_fid.fid_ino = ip->i_ino;
Christoph Hellwig3398a402017-06-14 21:30:44 -0700110 hsize = sizeof(xfs_handle_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 }
112
Christoph Hellwig4346cdd2009-02-08 21:51:14 +0100113 error = -EFAULT;
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600114 if (copy_to_user(hreq->ohandle, &handle, hsize) ||
Christoph Hellwig4346cdd2009-02-08 21:51:14 +0100115 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
116 goto out_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Christoph Hellwig4346cdd2009-02-08 21:51:14 +0100118 error = 0;
119
120 out_put:
121 if (cmd == XFS_IOC_FD_TO_HANDLE)
Al Viro2903ff02012-08-28 12:52:22 -0400122 fdput(f);
Christoph Hellwig4346cdd2009-02-08 21:51:14 +0100123 else
124 path_put(&path);
125 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126}
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128/*
Christoph Hellwigd296d302009-01-19 02:02:57 +0100129 * No need to do permission checks on the various pathname components
130 * as the handle operations are privileged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 */
132STATIC int
Christoph Hellwigd296d302009-01-19 02:02:57 +0100133xfs_handle_acceptable(
134 void *context,
135 struct dentry *dentry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
Christoph Hellwigd296d302009-01-19 02:02:57 +0100137 return 1;
138}
139
140/*
141 * Convert userspace handle data into a dentry.
142 */
143struct dentry *
144xfs_handle_to_dentry(
145 struct file *parfilp,
146 void __user *uhandle,
147 u32 hlen)
148{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 xfs_handle_t handle;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100150 struct xfs_fid64 fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152 /*
153 * Only allow handle opens under a directory.
154 */
Al Viro496ad9a2013-01-23 17:07:38 -0500155 if (!S_ISDIR(file_inode(parfilp)->i_mode))
Christoph Hellwigd296d302009-01-19 02:02:57 +0100156 return ERR_PTR(-ENOTDIR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Christoph Hellwigd296d302009-01-19 02:02:57 +0100158 if (hlen != sizeof(xfs_handle_t))
159 return ERR_PTR(-EINVAL);
160 if (copy_from_user(&handle, uhandle, hlen))
161 return ERR_PTR(-EFAULT);
162 if (handle.ha_fid.fid_len !=
163 sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len))
164 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Christoph Hellwigd296d302009-01-19 02:02:57 +0100166 memset(&fid, 0, sizeof(struct fid));
167 fid.ino = handle.ha_fid.fid_ino;
168 fid.gen = handle.ha_fid.fid_gen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Christoph Hellwigd296d302009-01-19 02:02:57 +0100170 return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3,
171 FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG,
172 xfs_handle_acceptable, NULL);
173}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Christoph Hellwigd296d302009-01-19 02:02:57 +0100175STATIC struct dentry *
176xfs_handlereq_to_dentry(
177 struct file *parfilp,
178 xfs_fsop_handlereq_t *hreq)
179{
180 return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600183int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184xfs_open_by_handle(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 struct file *parfilp,
Christoph Hellwigd296d302009-01-19 02:02:57 +0100186 xfs_fsop_handlereq_t *hreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
David Howells745ca242008-11-14 10:39:22 +1100188 const struct cred *cred = current_cred();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 int error;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100190 int fd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 int permflag;
192 struct file *filp;
193 struct inode *inode;
194 struct dentry *dentry;
Dave Chinner1a1d7722012-03-22 05:15:06 +0000195 fmode_t fmode;
Al Viro765927b2012-06-26 21:58:53 +0400196 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000199 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Christoph Hellwigd296d302009-01-19 02:02:57 +0100201 dentry = xfs_handlereq_to_dentry(parfilp, hreq);
202 if (IS_ERR(dentry))
203 return PTR_ERR(dentry);
David Howells2b0143b2015-03-17 22:25:59 +0000204 inode = d_inode(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Restrict xfs_open_by_handle to directories & regular files. */
207 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000208 error = -EPERM;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100209 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
211
212#if BITS_PER_LONG != 32
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600213 hreq->oflags |= O_LARGEFILE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214#endif
Christoph Hellwigd296d302009-01-19 02:02:57 +0100215
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600216 permflag = hreq->oflags;
Dave Chinner1a1d7722012-03-22 05:15:06 +0000217 fmode = OPEN_FMODE(permflag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
Dave Chinner1a1d7722012-03-22 05:15:06 +0000219 (fmode & FMODE_WRITE) && IS_APPEND(inode)) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000220 error = -EPERM;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100221 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 }
223
Dave Chinner1a1d7722012-03-22 05:15:06 +0000224 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
Eryu Guan337684a2016-08-02 19:58:28 +0800225 error = -EPERM;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100226 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 }
228
229 /* Can't write directories. */
Dave Chinner1a1d7722012-03-22 05:15:06 +0000230 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000231 error = -EISDIR;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100232 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 }
234
Yann Droneaud862a6292013-07-02 18:39:34 +0200235 fd = get_unused_fd_flags(0);
Christoph Hellwigd296d302009-01-19 02:02:57 +0100236 if (fd < 0) {
237 error = fd;
238 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 }
240
Al Viro765927b2012-06-26 21:58:53 +0400241 path.mnt = parfilp->f_path.mnt;
242 path.dentry = dentry;
243 filp = dentry_open(&path, hreq->oflags, cred);
244 dput(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (IS_ERR(filp)) {
Christoph Hellwigd296d302009-01-19 02:02:57 +0100246 put_unused_fd(fd);
247 return PTR_ERR(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 }
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500249
Al Viro03209372011-07-25 20:54:24 -0400250 if (S_ISREG(inode->i_mode)) {
Vlad Apostolov2e2e7bb2006-11-11 18:04:47 +1100251 filp->f_flags |= O_NOATIME;
Christoph Hellwig4d4be482008-12-09 04:47:33 -0500252 filp->f_mode |= FMODE_NOCMTIME;
Vlad Apostolov2e2e7bb2006-11-11 18:04:47 +1100253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Christoph Hellwigd296d302009-01-19 02:02:57 +0100255 fd_install(fd, filp);
256 return fd;
257
258 out_dput:
259 dput(dentry);
260 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261}
262
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600263int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264xfs_readlink_by_handle(
Christoph Hellwigd296d302009-01-19 02:02:57 +0100265 struct file *parfilp,
266 xfs_fsop_handlereq_t *hreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267{
Christoph Hellwigd296d302009-01-19 02:02:57 +0100268 struct dentry *dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 __u32 olen;
Christoph Hellwig804c83c2007-08-28 13:59:03 +1000270 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
272 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000273 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Christoph Hellwigd296d302009-01-19 02:02:57 +0100275 dentry = xfs_handlereq_to_dentry(parfilp, hreq);
276 if (IS_ERR(dentry))
277 return PTR_ERR(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279 /* Restrict this handle operation to symlinks only. */
Miklos Szeredifd4a0edf2016-12-09 16:45:04 +0100280 if (!d_is_symlink(dentry)) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000281 error = -EINVAL;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100282 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 }
284
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600285 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000286 error = -EFAULT;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100287 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Miklos Szeredifd4a0edf2016-12-09 16:45:04 +0100290 error = vfs_readlink(dentry, hreq->ohandle, olen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Christoph Hellwigd296d302009-01-19 02:02:57 +0100292 out_dput:
293 dput(dentry);
Christoph Hellwig804c83c2007-08-28 13:59:03 +1000294 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295}
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297STATIC int
298xfs_attrlist_by_handle(
Christoph Hellwigd296d302009-01-19 02:02:57 +0100299 struct file *parfilp,
300 void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
Christoph Hellwigd296d302009-01-19 02:02:57 +0100302 int error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 attrlist_cursor_kern_t *cursor;
Darrick J. Wong0facef72016-08-03 10:58:53 +1000304 struct xfs_fsop_attrlist_handlereq __user *p = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 xfs_fsop_attrlist_handlereq_t al_hreq;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100306 struct dentry *dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 char *kbuf;
308
309 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000310 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000312 return -EFAULT;
Dan Carpenter071c5292013-10-31 21:00:10 +0300313 if (al_hreq.buflen < sizeof(struct attrlist) ||
Jan Tulak4e247612015-10-12 16:02:56 +1100314 al_hreq.buflen > XFS_XATTR_LIST_MAX)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000315 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
Christoph Hellwig90ad58a2008-06-27 13:32:19 +1000317 /*
318 * Reject flags, only allow namespaces.
319 */
320 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000321 return -EINVAL;
Christoph Hellwig4d542e42020-02-26 17:30:28 -0800322 if (al_hreq.flags == (ATTR_ROOT | ATTR_SECURE))
323 return -EINVAL;
Christoph Hellwig90ad58a2008-06-27 13:32:19 +1000324
Christoph Hellwigd296d302009-01-19 02:02:57 +0100325 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
326 if (IS_ERR(dentry))
327 return PTR_ERR(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Tetsuo Handa707e0dd2019-08-26 12:06:22 -0700329 kbuf = kmem_zalloc_large(al_hreq.buflen, 0);
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000330 if (!kbuf)
331 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
David Howells2b0143b2015-03-17 22:25:59 +0000334 error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000335 al_hreq.flags, cursor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 if (error)
337 goto out_kfree;
338
Darrick J. Wong0facef72016-08-03 10:58:53 +1000339 if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
340 error = -EFAULT;
341 goto out_kfree;
342 }
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
345 error = -EFAULT;
346
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000347out_kfree:
348 kmem_free(kbuf);
349out_dput:
Christoph Hellwigd296d302009-01-19 02:02:57 +0100350 dput(dentry);
351 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Christoph Hellwigd0ce64392020-02-26 17:30:31 -0800354static int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355xfs_attrmulti_attr_get(
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000356 struct inode *inode,
Dave Chinnera9273ca2010-01-20 10:47:48 +1100357 unsigned char *name,
358 unsigned char __user *ubuf,
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700359 uint32_t *len,
360 uint32_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Christoph Hellwige5171d72020-02-26 17:30:34 -0800362 struct xfs_da_args args = {
363 .dp = XFS_I(inode),
364 .flags = flags,
365 .name = name,
366 .namelen = strlen(name),
367 .valuelen = *len,
368 };
369 int error;
Barry Naujoke8b0eba2008-04-22 17:34:31 +1000370
Jan Tulak51fcbfe2015-10-12 16:03:59 +1100371 if (*len > XFS_XATTR_SIZE_MAX)
Dave Chinner24513372014-06-25 14:58:08 +1000372 return -EINVAL;
Christoph Hellwige5171d72020-02-26 17:30:34 -0800373
374 args.value = kmem_zalloc_large(*len, 0);
375 if (!args.value)
Dave Chinner24513372014-06-25 14:58:08 +1000376 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Christoph Hellwige5171d72020-02-26 17:30:34 -0800378 error = xfs_attr_get(&args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 if (error)
380 goto out_kfree;
381
Christoph Hellwige5171d72020-02-26 17:30:34 -0800382 *len = args.valuelen;
383 if (copy_to_user(ubuf, args.value, args.valuelen))
Dave Chinner24513372014-06-25 14:58:08 +1000384 error = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000386out_kfree:
Christoph Hellwige5171d72020-02-26 17:30:34 -0800387 kmem_free(args.value);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 return error;
389}
390
Christoph Hellwigd0ce64392020-02-26 17:30:31 -0800391static int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392xfs_attrmulti_attr_set(
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000393 struct inode *inode,
Dave Chinnera9273ca2010-01-20 10:47:48 +1100394 unsigned char *name,
395 const unsigned char __user *ubuf,
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700396 uint32_t len,
397 uint32_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
Christoph Hellwiga2544622020-02-26 17:30:33 -0800399 struct xfs_da_args args = {
400 .dp = XFS_I(inode),
401 .flags = flags,
402 .name = name,
403 .namelen = strlen(name),
404 };
Andreas Gruenbacher09cb22d2015-11-03 12:53:54 +1100405 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000407 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
Dave Chinner24513372014-06-25 14:58:08 +1000408 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Christoph Hellwig6cc4f4f2020-02-26 17:30:30 -0800410 if (ubuf) {
411 if (len > XFS_XATTR_SIZE_MAX)
412 return -EINVAL;
Christoph Hellwiga2544622020-02-26 17:30:33 -0800413 args.value = memdup_user(ubuf, len);
414 if (IS_ERR(args.value))
415 return PTR_ERR(args.value);
416 args.valuelen = len;
Christoph Hellwig6cc4f4f2020-02-26 17:30:30 -0800417 }
Barry Naujoke8b0eba2008-04-22 17:34:31 +1000418
Christoph Hellwiga2544622020-02-26 17:30:33 -0800419 error = xfs_attr_set(&args);
Andreas Gruenbacher47e1bf62015-11-03 12:56:17 +1100420 if (!error)
421 xfs_forget_acl(inode, name, flags);
Christoph Hellwiga2544622020-02-26 17:30:33 -0800422 kfree(args.value);
Andreas Gruenbacher09cb22d2015-11-03 12:53:54 +1100423 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424}
425
Christoph Hellwigd0ce64392020-02-26 17:30:31 -0800426int
427xfs_ioc_attrmulti_one(
428 struct file *parfilp,
429 struct inode *inode,
430 uint32_t opcode,
431 void __user *uname,
432 void __user *value,
433 uint32_t *len,
434 uint32_t flags)
435{
436 unsigned char *name;
437 int error;
438
439 if ((flags & ATTR_ROOT) && (flags & ATTR_SECURE))
440 return -EINVAL;
441 flags &= ~ATTR_KERNEL_FLAGS;
442
443 name = strndup_user(uname, MAXNAMELEN);
444 if (IS_ERR(name))
445 return PTR_ERR(name);
446
447 switch (opcode) {
448 case ATTR_OP_GET:
449 error = xfs_attrmulti_attr_get(inode, name, value, len, flags);
450 break;
451 case ATTR_OP_REMOVE:
452 value = NULL;
453 *len = 0;
454 /* fall through */
455 case ATTR_OP_SET:
456 error = mnt_want_write_file(parfilp);
457 if (error)
458 break;
459 error = xfs_attrmulti_attr_set(inode, name, value, *len, flags);
460 mnt_drop_write_file(parfilp);
461 break;
462 default:
463 error = -EINVAL;
464 break;
465 }
466
467 kfree(name);
468 return error;
469}
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471STATIC int
472xfs_attrmulti_by_handle(
Dave Hansen42a74f22008-02-15 14:37:46 -0800473 struct file *parfilp,
Christoph Hellwigd296d302009-01-19 02:02:57 +0100474 void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475{
476 int error;
477 xfs_attr_multiop_t *ops;
478 xfs_fsop_attrmulti_handlereq_t am_hreq;
Christoph Hellwigd296d302009-01-19 02:02:57 +0100479 struct dentry *dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 unsigned int i, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
482 if (!capable(CAP_SYS_ADMIN))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000483 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000485 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
Zhitong Wangfda168c2010-03-23 09:51:22 +1100487 /* overflow check */
488 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t))
489 return -E2BIG;
490
Christoph Hellwigd296d302009-01-19 02:02:57 +0100491 dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
492 if (IS_ERR(dentry))
493 return PTR_ERR(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Dave Chinner24513372014-06-25 14:58:08 +1000495 error = -E2BIG;
Christoph Hellwige182f572008-06-27 13:32:31 +1000496 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 if (!size || size > 16 * PAGE_SIZE)
Christoph Hellwigd296d302009-01-19 02:02:57 +0100498 goto out_dput;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Li Zefan0e639bd2009-04-08 15:08:04 +0800500 ops = memdup_user(am_hreq.ops, size);
501 if (IS_ERR(ops)) {
Dave Chinner24513372014-06-25 14:58:08 +1000502 error = PTR_ERR(ops);
Christoph Hellwigd296d302009-01-19 02:02:57 +0100503 goto out_dput;
Li Zefan0e639bd2009-04-08 15:08:04 +0800504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 error = 0;
507 for (i = 0; i < am_hreq.opcount; i++) {
Christoph Hellwigd0ce64392020-02-26 17:30:31 -0800508 ops[i].am_error = xfs_ioc_attrmulti_one(parfilp,
509 d_inode(dentry), ops[i].am_opcode,
510 ops[i].am_attrname, ops[i].am_attrvalue,
511 &ops[i].am_length, ops[i].am_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 }
513
514 if (copy_to_user(am_hreq.ops, ops, size))
Dave Chinner24513372014-06-25 14:58:08 +1000515 error = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 kfree(ops);
Christoph Hellwigd296d302009-01-19 02:02:57 +0100518 out_dput:
519 dput(dentry);
Dave Chinner24513372014-06-25 14:58:08 +1000520 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521}
522
sandeen@sandeen.netd5547f92008-11-25 21:20:08 -0600523int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524xfs_ioc_space(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 struct file *filp,
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600526 xfs_flock64_t *bf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527{
Christoph Hellwig8f3e2052016-07-20 11:29:35 +1000528 struct inode *inode = file_inode(filp);
529 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig865e9442013-10-12 00:55:08 -0700530 struct iattr iattr;
Christoph Hellwig837a6e72019-10-24 22:26:02 -0700531 enum xfs_prealloc_flags flags = XFS_PREALLOC_CLEAR;
Dan Williamsc63a8ea2018-03-12 14:12:29 -0700532 uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 int error;
534
Alexey Dobriyanf37ea142006-09-28 10:52:04 +1000535 if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000536 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Eric Sandeenad4a8ac2005-09-02 16:41:16 +1000538 if (!(filp->f_mode & FMODE_WRITE))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000539 return -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Alexey Dobriyanf37ea142006-09-28 10:52:04 +1000541 if (!S_ISREG(inode->i_mode))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000542 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Christoph Hellwig7a42c702019-10-24 22:26:27 -0700544 if (xfs_is_always_cow_inode(ip))
545 return -EOPNOTSUPP;
546
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100547 if (filp->f_flags & O_DSYNC)
548 flags |= XFS_PREALLOC_SYNC;
Christoph Hellwig8f3e2052016-07-20 11:29:35 +1000549 if (filp->f_mode & FMODE_NOCMTIME)
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100550 flags |= XFS_PREALLOC_INVISIBLE;
551
Jan Karad9457dc2012-06-12 16:20:39 +0200552 error = mnt_want_write_file(filp);
553 if (error)
554 return error;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700555
Christoph Hellwig781355c2015-02-16 11:59:50 +1100556 xfs_ilock(ip, iolock);
Dan Williams69eb5fa2018-03-20 14:42:38 -0700557 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100558 if (error)
559 goto out_unlock;
Dave Chinner249bd902019-10-29 13:04:32 -0700560 inode_dio_wait(inode);
Christoph Hellwig865e9442013-10-12 00:55:08 -0700561
562 switch (bf->l_whence) {
563 case 0: /*SEEK_SET*/
564 break;
565 case 1: /*SEEK_CUR*/
566 bf->l_start += filp->f_pos;
567 break;
568 case 2: /*SEEK_END*/
569 bf->l_start += XFS_ISIZE(ip);
570 break;
571 default:
Dave Chinner24513372014-06-25 14:58:08 +1000572 error = -EINVAL;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700573 goto out_unlock;
574 }
575
Christoph Hellwig837a6e72019-10-24 22:26:02 -0700576 if (bf->l_start < 0 || bf->l_start > inode->i_sb->s_maxbytes) {
Dave Chinner24513372014-06-25 14:58:08 +1000577 error = -EINVAL;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700578 goto out_unlock;
579 }
580
Christoph Hellwig837a6e72019-10-24 22:26:02 -0700581 if (bf->l_start > XFS_ISIZE(ip)) {
582 error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
583 bf->l_start - XFS_ISIZE(ip), 0);
584 if (error)
585 goto out_unlock;
Christoph Hellwig865e9442013-10-12 00:55:08 -0700586 }
587
Christoph Hellwig837a6e72019-10-24 22:26:02 -0700588 iattr.ia_valid = ATTR_SIZE;
589 iattr.ia_size = bf->l_start;
590 error = xfs_vn_setattr_size(file_dentry(filp), &iattr);
Christoph Hellwig865e9442013-10-12 00:55:08 -0700591 if (error)
592 goto out_unlock;
593
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100594 error = xfs_update_prealloc_flags(ip, flags);
Christoph Hellwig865e9442013-10-12 00:55:08 -0700595
596out_unlock:
Christoph Hellwig781355c2015-02-16 11:59:50 +1100597 xfs_iunlock(ip, iolock);
Jan Karad9457dc2012-06-12 16:20:39 +0200598 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +1000599 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700602/* Return 0 on success or positive error */
603int
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700604xfs_fsbulkstat_one_fmt(
Darrick J. Wong7035f972019-07-03 20:36:26 -0700605 struct xfs_ibulk *breq,
606 const struct xfs_bulkstat *bstat)
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700607{
Darrick J. Wong7035f972019-07-03 20:36:26 -0700608 struct xfs_bstat bs1;
609
610 xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
611 if (copy_to_user(breq->ubuffer, &bs1, sizeof(bs1)))
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700612 return -EFAULT;
613 return xfs_ibulk_advance(breq, sizeof(struct xfs_bstat));
614}
615
Darrick J. Wong677717f2019-07-02 09:39:43 -0700616int
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700617xfs_fsinumbers_fmt(
Darrick J. Wong5f19c7f2019-07-03 20:36:27 -0700618 struct xfs_ibulk *breq,
619 const struct xfs_inumbers *igrp)
Darrick J. Wong677717f2019-07-02 09:39:43 -0700620{
Darrick J. Wong5f19c7f2019-07-03 20:36:27 -0700621 struct xfs_inogrp ig1;
622
623 xfs_inumbers_to_inogrp(&ig1, igrp);
624 if (copy_to_user(breq->ubuffer, &ig1, sizeof(struct xfs_inogrp)))
Darrick J. Wong677717f2019-07-02 09:39:43 -0700625 return -EFAULT;
626 return xfs_ibulk_advance(breq, sizeof(struct xfs_inogrp));
627}
628
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629STATIC int
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700630xfs_ioc_fsbulkstat(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 xfs_mount_t *mp,
632 unsigned int cmd,
633 void __user *arg)
634{
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700635 struct xfs_fsop_bulkreq bulkreq;
636 struct xfs_ibulk breq = {
637 .mp = mp,
638 .ocount = 0,
639 };
640 xfs_ino_t lastino;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 int error;
642
643 /* done = 1 if there are more stats to get and if bulkstat */
644 /* should be called again (unused here, but used in dmapi) */
645
646 if (!capable(CAP_SYS_ADMIN))
647 return -EPERM;
648
649 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000650 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
Darrick J. Wong6f71fb62019-07-03 20:36:25 -0700652 if (copy_from_user(&bulkreq, arg, sizeof(struct xfs_fsop_bulkreq)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000653 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700655 if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000656 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700658 if (bulkreq.icount <= 0)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000659 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Lachlan McIlroycd57e592007-11-23 16:30:32 +1100661 if (bulkreq.ubuffer == NULL)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000662 return -EINVAL;
Lachlan McIlroycd57e592007-11-23 16:30:32 +1100663
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700664 breq.ubuffer = bulkreq.ubuffer;
665 breq.icount = bulkreq.icount;
666
667 /*
668 * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
669 * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect
670 * that *lastip contains either zero or the number of the last inode to
671 * be examined by the previous call and return results starting with
672 * the next inode after that. The new bulk request back end functions
673 * take the inode to start with, so we have to compute the startino
674 * parameter from lastino to maintain correct function. lastino == 0
675 * is a special case because it has traditionally meant "first inode
676 * in filesystem".
677 */
678 if (cmd == XFS_IOC_FSINUMBERS) {
Darrick J. Wong677717f2019-07-02 09:39:43 -0700679 breq.startino = lastino ? lastino + 1 : 0;
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700680 error = xfs_inumbers(&breq, xfs_fsinumbers_fmt);
Darrick J. Wong677717f2019-07-02 09:39:43 -0700681 lastino = breq.startino - 1;
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700682 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) {
683 breq.startino = lastino;
684 breq.icount = 1;
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700685 error = xfs_bulkstat_one(&breq, xfs_fsbulkstat_one_fmt);
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700686 } else { /* XFS_IOC_FSBULKSTAT */
687 breq.startino = lastino ? lastino + 1 : 0;
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -0700688 error = xfs_bulkstat(&breq, xfs_fsbulkstat_one_fmt);
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700689 lastino = breq.startino - 1;
690 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692 if (error)
Dave Chinner24513372014-06-25 14:58:08 +1000693 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700695 if (bulkreq.lastip != NULL &&
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700696 copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700697 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700699 if (bulkreq.ocount != NULL &&
Darrick J. Wong2810bd62019-07-02 09:39:40 -0700700 copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
Darrick J. Wongf16fe3e2019-07-02 09:39:39 -0700701 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
703 return 0;
704}
705
Darrick J. Wong0448b6f2019-07-03 20:36:27 -0700706/* Return 0 on success or positive error */
707static int
708xfs_bulkstat_fmt(
709 struct xfs_ibulk *breq,
710 const struct xfs_bulkstat *bstat)
711{
712 if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat)))
713 return -EFAULT;
714 return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat));
715}
716
717/*
718 * Check the incoming bulk request @hdr from userspace and initialize the
719 * internal @breq bulk request appropriately. Returns 0 if the bulk request
Darrick J. Wonge7ee96d2019-08-28 14:37:57 -0700720 * should proceed; -ECANCELED if there's nothing to do; or the usual
Darrick J. Wong0448b6f2019-07-03 20:36:27 -0700721 * negative error code.
722 */
723static int
724xfs_bulk_ireq_setup(
725 struct xfs_mount *mp,
726 struct xfs_bulk_ireq *hdr,
727 struct xfs_ibulk *breq,
728 void __user *ubuffer)
729{
730 if (hdr->icount == 0 ||
731 (hdr->flags & ~XFS_BULK_IREQ_FLAGS_ALL) ||
Darrick J. Wong0448b6f2019-07-03 20:36:27 -0700732 memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
733 return -EINVAL;
734
735 breq->startino = hdr->ino;
736 breq->ubuffer = ubuffer;
737 breq->icount = hdr->icount;
738 breq->ocount = 0;
Darrick J. Wong13d59a22019-07-03 20:36:28 -0700739 breq->flags = 0;
740
741 /*
Darrick J. Wongbf3cb392019-07-03 20:36:29 -0700742 * The @ino parameter is a special value, so we must look it up here.
743 * We're not allowed to have IREQ_AGNO, and we only return one inode
744 * worth of data.
745 */
746 if (hdr->flags & XFS_BULK_IREQ_SPECIAL) {
747 if (hdr->flags & XFS_BULK_IREQ_AGNO)
748 return -EINVAL;
749
750 switch (hdr->ino) {
751 case XFS_BULK_IREQ_SPECIAL_ROOT:
752 hdr->ino = mp->m_sb.sb_rootino;
753 break;
754 default:
755 return -EINVAL;
756 }
757 breq->icount = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 }
759
Darrick J. Wongbf3cb392019-07-03 20:36:29 -0700760 /*
Darrick J. Wong13d59a22019-07-03 20:36:28 -0700761 * The IREQ_AGNO flag means that we only want results from a given AG.
762 * If @hdr->ino is zero, we start iterating in that AG. If @hdr->ino is
763 * beyond the specified AG then we return no results.
764 */
765 if (hdr->flags & XFS_BULK_IREQ_AGNO) {
766 if (hdr->agno >= mp->m_sb.sb_agcount)
767 return -EINVAL;
768
769 if (breq->startino == 0)
770 breq->startino = XFS_AGINO_TO_INO(mp, hdr->agno, 0);
771 else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno)
772 return -EINVAL;
773
774 breq->flags |= XFS_IBULK_SAME_AG;
775
776 /* Asking for an inode past the end of the AG? We're done! */
777 if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
Darrick J. Wonge7ee96d2019-08-28 14:37:57 -0700778 return -ECANCELED;
Darrick J. Wong13d59a22019-07-03 20:36:28 -0700779 } else if (hdr->agno)
780 return -EINVAL;
Darrick J. Wong0448b6f2019-07-03 20:36:27 -0700781
782 /* Asking for an inode past the end of the FS? We're done! */
783 if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount)
Darrick J. Wonge7ee96d2019-08-28 14:37:57 -0700784 return -ECANCELED;
Darrick J. Wong0448b6f2019-07-03 20:36:27 -0700785
786 return 0;
787}
788
789/*
790 * Update the userspace bulk request @hdr to reflect the end state of the
791 * internal bulk request @breq.
792 */
793static void
794xfs_bulk_ireq_teardown(
795 struct xfs_bulk_ireq *hdr,
796 struct xfs_ibulk *breq)
797{
798 hdr->ino = breq->startino;
799 hdr->ocount = breq->ocount;
800}
801
802/* Handle the v5 bulkstat ioctl. */
803STATIC int
804xfs_ioc_bulkstat(
805 struct xfs_mount *mp,
806 unsigned int cmd,
807 struct xfs_bulkstat_req __user *arg)
808{
809 struct xfs_bulk_ireq hdr;
810 struct xfs_ibulk breq = {
811 .mp = mp,
812 };
813 int error;
814
815 if (!capable(CAP_SYS_ADMIN))
816 return -EPERM;
817
818 if (XFS_FORCED_SHUTDOWN(mp))
819 return -EIO;
820
821 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
822 return -EFAULT;
823
824 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat);
Darrick J. Wonge7ee96d2019-08-28 14:37:57 -0700825 if (error == -ECANCELED)
Darrick J. Wong0448b6f2019-07-03 20:36:27 -0700826 goto out_teardown;
827 if (error < 0)
828 return error;
829
830 error = xfs_bulkstat(&breq, xfs_bulkstat_fmt);
831 if (error)
832 return error;
833
834out_teardown:
835 xfs_bulk_ireq_teardown(&hdr, &breq);
836 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
837 return -EFAULT;
838
839 return 0;
840}
841
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842STATIC int
Darrick J. Wongfba97602019-07-03 20:36:28 -0700843xfs_inumbers_fmt(
844 struct xfs_ibulk *breq,
845 const struct xfs_inumbers *igrp)
846{
847 if (copy_to_user(breq->ubuffer, igrp, sizeof(struct xfs_inumbers)))
848 return -EFAULT;
849 return xfs_ibulk_advance(breq, sizeof(struct xfs_inumbers));
850}
851
852/* Handle the v5 inumbers ioctl. */
853STATIC int
854xfs_ioc_inumbers(
855 struct xfs_mount *mp,
856 unsigned int cmd,
857 struct xfs_inumbers_req __user *arg)
858{
859 struct xfs_bulk_ireq hdr;
860 struct xfs_ibulk breq = {
861 .mp = mp,
862 };
863 int error;
864
865 if (!capable(CAP_SYS_ADMIN))
866 return -EPERM;
867
868 if (XFS_FORCED_SHUTDOWN(mp))
869 return -EIO;
870
871 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
872 return -EFAULT;
873
874 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers);
Darrick J. Wonge7ee96d2019-08-28 14:37:57 -0700875 if (error == -ECANCELED)
Darrick J. Wongfba97602019-07-03 20:36:28 -0700876 goto out_teardown;
877 if (error < 0)
878 return error;
879
880 error = xfs_inumbers(&breq, xfs_inumbers_fmt);
881 if (error)
882 return error;
883
884out_teardown:
885 xfs_bulk_ireq_teardown(&hdr, &breq);
886 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
887 return -EFAULT;
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 return 0;
890}
891
892STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893xfs_ioc_fsgeometry(
Dave Chinner1b6d9682019-04-12 07:41:16 -0700894 struct xfs_mount *mp,
895 void __user *arg,
896 int struct_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897{
Dave Chinner1b6d9682019-04-12 07:41:16 -0700898 struct xfs_fsop_geom fsgeo;
899 size_t len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
Eric Sandeen91083262019-05-01 20:26:30 -0700901 xfs_fs_geometry(&mp->m_sb, &fsgeo, struct_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Dave Chinner1b6d9682019-04-12 07:41:16 -0700903 if (struct_version <= 3)
904 len = sizeof(struct xfs_fsop_geom_v1);
905 else if (struct_version == 4)
906 len = sizeof(struct xfs_fsop_geom_v4);
Darrick J. Wongc23232d2019-04-12 07:41:17 -0700907 else {
908 xfs_fsop_geom_health(mp, &fsgeo);
Dave Chinner1b6d9682019-04-12 07:41:16 -0700909 len = sizeof(fsgeo);
Darrick J. Wongc23232d2019-04-12 07:41:17 -0700910 }
Dave Chinner1b6d9682019-04-12 07:41:16 -0700911
912 if (copy_to_user(arg, &fsgeo, len))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000913 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 return 0;
915}
916
Darrick J. Wong7cd50062019-04-12 07:41:17 -0700917STATIC int
918xfs_ioc_ag_geometry(
919 struct xfs_mount *mp,
920 void __user *arg)
921{
922 struct xfs_ag_geometry ageo;
923 int error;
924
925 if (copy_from_user(&ageo, arg, sizeof(ageo)))
926 return -EFAULT;
Darrick J. Wong76f17932019-08-30 16:30:22 -0700927 if (ageo.ag_flags)
928 return -EINVAL;
929 if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved)))
930 return -EINVAL;
Darrick J. Wong7cd50062019-04-12 07:41:17 -0700931
932 error = xfs_ag_get_geometry(mp, ageo.ag_number, &ageo);
933 if (error)
934 return error;
935
936 if (copy_to_user(arg, &ageo, sizeof(ageo)))
937 return -EFAULT;
938 return 0;
939}
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941/*
942 * Linux extended inode flags interface.
943 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
945STATIC unsigned int
946xfs_merge_ioc_xflags(
947 unsigned int flags,
948 unsigned int start)
949{
950 unsigned int xflags = start;
951
Eric Sandeen39058a02007-02-10 18:37:10 +1100952 if (flags & FS_IMMUTABLE_FL)
Dave Chinnere7b89482016-01-04 16:44:15 +1100953 xflags |= FS_XFLAG_IMMUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 else
Dave Chinnere7b89482016-01-04 16:44:15 +1100955 xflags &= ~FS_XFLAG_IMMUTABLE;
Eric Sandeen39058a02007-02-10 18:37:10 +1100956 if (flags & FS_APPEND_FL)
Dave Chinnere7b89482016-01-04 16:44:15 +1100957 xflags |= FS_XFLAG_APPEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 else
Dave Chinnere7b89482016-01-04 16:44:15 +1100959 xflags &= ~FS_XFLAG_APPEND;
Eric Sandeen39058a02007-02-10 18:37:10 +1100960 if (flags & FS_SYNC_FL)
Dave Chinnere7b89482016-01-04 16:44:15 +1100961 xflags |= FS_XFLAG_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 else
Dave Chinnere7b89482016-01-04 16:44:15 +1100963 xflags &= ~FS_XFLAG_SYNC;
Eric Sandeen39058a02007-02-10 18:37:10 +1100964 if (flags & FS_NOATIME_FL)
Dave Chinnere7b89482016-01-04 16:44:15 +1100965 xflags |= FS_XFLAG_NOATIME;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 else
Dave Chinnere7b89482016-01-04 16:44:15 +1100967 xflags &= ~FS_XFLAG_NOATIME;
Eric Sandeen39058a02007-02-10 18:37:10 +1100968 if (flags & FS_NODUMP_FL)
Dave Chinnere7b89482016-01-04 16:44:15 +1100969 xflags |= FS_XFLAG_NODUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 else
Dave Chinnere7b89482016-01-04 16:44:15 +1100971 xflags &= ~FS_XFLAG_NODUMP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 return xflags;
974}
975
976STATIC unsigned int
977xfs_di2lxflags(
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700978 uint16_t di_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979{
980 unsigned int flags = 0;
981
982 if (di_flags & XFS_DIFLAG_IMMUTABLE)
Eric Sandeen39058a02007-02-10 18:37:10 +1100983 flags |= FS_IMMUTABLE_FL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 if (di_flags & XFS_DIFLAG_APPEND)
Eric Sandeen39058a02007-02-10 18:37:10 +1100985 flags |= FS_APPEND_FL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (di_flags & XFS_DIFLAG_SYNC)
Eric Sandeen39058a02007-02-10 18:37:10 +1100987 flags |= FS_SYNC_FL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 if (di_flags & XFS_DIFLAG_NOATIME)
Eric Sandeen39058a02007-02-10 18:37:10 +1100989 flags |= FS_NOATIME_FL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 if (di_flags & XFS_DIFLAG_NODUMP)
Eric Sandeen39058a02007-02-10 18:37:10 +1100991 flags |= FS_NODUMP_FL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 return flags;
993}
994
Darrick J. Wong7b0e4922019-07-01 08:25:35 -0700995static void
996xfs_fill_fsxattr(
997 struct xfs_inode *ip,
998 bool attr,
999 struct fsxattr *fa)
1000{
1001 simple_fill_fsxattr(fa, xfs_ip2xflags(ip));
1002 fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
1003 fa->fsx_cowextsize = ip->i_d.di_cowextsize <<
1004 ip->i_mount->m_sb.sb_blocklog;
Christoph Hellwigde7a8662019-11-12 08:22:54 -08001005 fa->fsx_projid = ip->i_d.di_projid;
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001006
1007 if (attr) {
1008 if (ip->i_afp) {
1009 if (ip->i_afp->if_flags & XFS_IFEXTENTS)
1010 fa->fsx_nextents = xfs_iext_count(ip->i_afp);
1011 else
1012 fa->fsx_nextents = ip->i_d.di_anextents;
1013 } else
1014 fa->fsx_nextents = 0;
1015 } else {
1016 if (ip->i_df.if_flags & XFS_IFEXTENTS)
1017 fa->fsx_nextents = xfs_iext_count(&ip->i_df);
1018 else
1019 fa->fsx_nextents = ip->i_d.di_nextents;
1020 }
1021}
1022
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023STATIC int
Christoph Hellwigc83bfab2007-10-11 17:47:00 +10001024xfs_ioc_fsgetxattr(
1025 xfs_inode_t *ip,
1026 int attr,
1027 void __user *arg)
1028{
1029 struct fsxattr fa;
1030
1031 xfs_ilock(ip, XFS_ILOCK_SHARED);
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001032 xfs_fill_fsxattr(ip, attr, &fa);
Christoph Hellwigc83bfab2007-10-11 17:47:00 +10001033 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1034
1035 if (copy_to_user(arg, &fa, sizeof(fa)))
1036 return -EFAULT;
1037 return 0;
1038}
1039
Christoph Hellwigdd606872017-09-02 08:21:20 -07001040STATIC uint16_t
1041xfs_flags2diflags(
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001042 struct xfs_inode *ip,
1043 unsigned int xflags)
1044{
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001045 /* can't set PREALLOC this way, just preserve it */
Christoph Hellwigdd606872017-09-02 08:21:20 -07001046 uint16_t di_flags =
1047 (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
1048
Dave Chinnere7b89482016-01-04 16:44:15 +11001049 if (xflags & FS_XFLAG_IMMUTABLE)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001050 di_flags |= XFS_DIFLAG_IMMUTABLE;
Dave Chinnere7b89482016-01-04 16:44:15 +11001051 if (xflags & FS_XFLAG_APPEND)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001052 di_flags |= XFS_DIFLAG_APPEND;
Dave Chinnere7b89482016-01-04 16:44:15 +11001053 if (xflags & FS_XFLAG_SYNC)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001054 di_flags |= XFS_DIFLAG_SYNC;
Dave Chinnere7b89482016-01-04 16:44:15 +11001055 if (xflags & FS_XFLAG_NOATIME)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001056 di_flags |= XFS_DIFLAG_NOATIME;
Dave Chinnere7b89482016-01-04 16:44:15 +11001057 if (xflags & FS_XFLAG_NODUMP)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001058 di_flags |= XFS_DIFLAG_NODUMP;
Dave Chinnere7b89482016-01-04 16:44:15 +11001059 if (xflags & FS_XFLAG_NODEFRAG)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001060 di_flags |= XFS_DIFLAG_NODEFRAG;
Dave Chinnere7b89482016-01-04 16:44:15 +11001061 if (xflags & FS_XFLAG_FILESTREAM)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001062 di_flags |= XFS_DIFLAG_FILESTREAM;
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001063 if (S_ISDIR(VFS_I(ip)->i_mode)) {
Dave Chinnere7b89482016-01-04 16:44:15 +11001064 if (xflags & FS_XFLAG_RTINHERIT)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001065 di_flags |= XFS_DIFLAG_RTINHERIT;
Dave Chinnere7b89482016-01-04 16:44:15 +11001066 if (xflags & FS_XFLAG_NOSYMLINKS)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001067 di_flags |= XFS_DIFLAG_NOSYMLINKS;
Dave Chinnere7b89482016-01-04 16:44:15 +11001068 if (xflags & FS_XFLAG_EXTSZINHERIT)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001069 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
Dave Chinnere7b89482016-01-04 16:44:15 +11001070 if (xflags & FS_XFLAG_PROJINHERIT)
Dave Chinner9336e3a2014-10-02 09:18:40 +10001071 di_flags |= XFS_DIFLAG_PROJINHERIT;
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001072 } else if (S_ISREG(VFS_I(ip)->i_mode)) {
Dave Chinnere7b89482016-01-04 16:44:15 +11001073 if (xflags & FS_XFLAG_REALTIME)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001074 di_flags |= XFS_DIFLAG_REALTIME;
Dave Chinnere7b89482016-01-04 16:44:15 +11001075 if (xflags & FS_XFLAG_EXTSIZE)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001076 di_flags |= XFS_DIFLAG_EXTSIZE;
1077 }
Dave Chinner58f88ca2016-01-04 16:44:15 +11001078
Christoph Hellwigdd606872017-09-02 08:21:20 -07001079 return di_flags;
1080}
Dave Chinner58f88ca2016-01-04 16:44:15 +11001081
Christoph Hellwigdd606872017-09-02 08:21:20 -07001082STATIC uint64_t
1083xfs_flags2diflags2(
1084 struct xfs_inode *ip,
1085 unsigned int xflags)
1086{
1087 uint64_t di_flags2 =
1088 (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK);
1089
Dave Chinner58f88ca2016-01-04 16:44:15 +11001090 if (xflags & FS_XFLAG_DAX)
1091 di_flags2 |= XFS_DIFLAG2_DAX;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001092 if (xflags & FS_XFLAG_COWEXTSIZE)
1093 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
Dave Chinner58f88ca2016-01-04 16:44:15 +11001094
Christoph Hellwigdd606872017-09-02 08:21:20 -07001095 return di_flags2;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001096}
1097
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001098STATIC void
1099xfs_diflags_to_linux(
1100 struct xfs_inode *ip)
1101{
David Chinnere4f75292008-08-13 16:00:45 +10001102 struct inode *inode = VFS_I(ip);
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001103 unsigned int xflags = xfs_ip2xflags(ip);
1104
Dave Chinnere7b89482016-01-04 16:44:15 +11001105 if (xflags & FS_XFLAG_IMMUTABLE)
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001106 inode->i_flags |= S_IMMUTABLE;
1107 else
1108 inode->i_flags &= ~S_IMMUTABLE;
Dave Chinnere7b89482016-01-04 16:44:15 +11001109 if (xflags & FS_XFLAG_APPEND)
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001110 inode->i_flags |= S_APPEND;
1111 else
1112 inode->i_flags &= ~S_APPEND;
Dave Chinnere7b89482016-01-04 16:44:15 +11001113 if (xflags & FS_XFLAG_SYNC)
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001114 inode->i_flags |= S_SYNC;
1115 else
1116 inode->i_flags &= ~S_SYNC;
Dave Chinnere7b89482016-01-04 16:44:15 +11001117 if (xflags & FS_XFLAG_NOATIME)
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001118 inode->i_flags |= S_NOATIME;
1119 else
1120 inode->i_flags &= ~S_NOATIME;
Christoph Hellwig742d8422017-08-30 09:23:01 -07001121#if 0 /* disabled until the flag switching races are sorted out */
Dave Chinner58f88ca2016-01-04 16:44:15 +11001122 if (xflags & FS_XFLAG_DAX)
1123 inode->i_flags |= S_DAX;
1124 else
1125 inode->i_flags &= ~S_DAX;
Christoph Hellwig742d8422017-08-30 09:23:01 -07001126#endif
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001127}
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001128
Dave Chinner29a17c02015-02-02 10:14:25 +11001129static int
1130xfs_ioctl_setattr_xflags(
1131 struct xfs_trans *tp,
1132 struct xfs_inode *ip,
1133 struct fsxattr *fa)
1134{
1135 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigdd606872017-09-02 08:21:20 -07001136 uint64_t di_flags2;
Dave Chinner29a17c02015-02-02 10:14:25 +11001137
1138 /* Can't change realtime flag if any extents are allocated. */
1139 if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
Dave Chinnere7b89482016-01-04 16:44:15 +11001140 XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME))
Dave Chinner29a17c02015-02-02 10:14:25 +11001141 return -EINVAL;
1142
1143 /* If realtime flag is set then must have realtime device */
Dave Chinnere7b89482016-01-04 16:44:15 +11001144 if (fa->fsx_xflags & FS_XFLAG_REALTIME) {
Dave Chinner29a17c02015-02-02 10:14:25 +11001145 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
1146 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize))
1147 return -EINVAL;
1148 }
1149
Darrick J. Wong1987fd72016-10-10 16:49:29 +11001150 /* Clear reflink if we are actually able to set the rt flag. */
Darrick J. Wongc8e156a2016-10-03 09:11:50 -07001151 if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip))
Darrick J. Wong1987fd72016-10-10 16:49:29 +11001152 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -07001153
Darrick J. Wong4f435eb2016-10-03 09:11:50 -07001154 /* Don't allow us to set DAX mode for a reflinked file for now. */
1155 if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip))
1156 return -EINVAL;
1157
Christoph Hellwigdd606872017-09-02 08:21:20 -07001158 /* diflags2 only valid for v3 inodes. */
1159 di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
1160 if (di_flags2 && ip->i_d.di_version < 3)
1161 return -EINVAL;
1162
1163 ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
1164 ip->i_d.di_flags2 = di_flags2;
1165
Dave Chinner29a17c02015-02-02 10:14:25 +11001166 xfs_diflags_to_linux(ip);
1167 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1168 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11001169 XFS_STATS_INC(mp, xs_ig_attrchg);
Dave Chinner29a17c02015-02-02 10:14:25 +11001170 return 0;
1171}
1172
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001173/*
Dave Chinner3a6a8542016-03-01 09:41:33 +11001174 * If we are changing DAX flags, we have to ensure the file is clean and any
1175 * cached objects in the address space are invalidated and removed. This
1176 * requires us to lock out other IO and page faults similar to a truncate
1177 * operation. The locks need to be held until the transaction has been committed
1178 * so that the cache invalidation is atomic with respect to the DAX flag
1179 * manipulation.
1180 */
1181static int
1182xfs_ioctl_setattr_dax_invalidate(
1183 struct xfs_inode *ip,
1184 struct fsxattr *fa,
1185 int *join_flags)
1186{
1187 struct inode *inode = VFS_I(ip);
Ross Zwisler6851a3d2017-09-18 14:46:03 -07001188 struct super_block *sb = inode->i_sb;
Dave Chinner3a6a8542016-03-01 09:41:33 +11001189 int error;
1190
1191 *join_flags = 0;
1192
1193 /*
1194 * It is only valid to set the DAX flag on regular files and
Dave Chinner64485432016-03-01 09:41:33 +11001195 * directories on filesystems where the block size is equal to the page
Darrick J. Wongaaacdd22018-05-31 15:07:47 -07001196 * size. On directories it serves as an inherited hint so we don't
1197 * have to check the device for dax support or flush pagecache.
Dave Chinner3a6a8542016-03-01 09:41:33 +11001198 */
Dave Chinner64485432016-03-01 09:41:33 +11001199 if (fa->fsx_xflags & FS_XFLAG_DAX) {
Christoph Hellwig30fa5292019-10-24 22:25:38 -07001200 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
1201
1202 if (!bdev_dax_supported(target->bt_bdev, sb->s_blocksize))
Dave Chinner64485432016-03-01 09:41:33 +11001203 return -EINVAL;
1204 }
Dave Chinner3a6a8542016-03-01 09:41:33 +11001205
1206 /* If the DAX state is not changing, we have nothing to do here. */
1207 if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode))
1208 return 0;
1209 if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode))
1210 return 0;
1211
Darrick J. Wongaaacdd22018-05-31 15:07:47 -07001212 if (S_ISDIR(inode->i_mode))
1213 return 0;
1214
Dave Chinner3a6a8542016-03-01 09:41:33 +11001215 /* lock, flush and invalidate mapping in preparation for flag change */
1216 xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
1217 error = filemap_write_and_wait(inode->i_mapping);
1218 if (error)
1219 goto out_unlock;
1220 error = invalidate_inode_pages2(inode->i_mapping);
1221 if (error)
1222 goto out_unlock;
1223
1224 *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL;
1225 return 0;
1226
1227out_unlock:
1228 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
1229 return error;
1230
1231}
1232
1233/*
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001234 * Set up the transaction structure for the setattr operation, checking that we
1235 * have permission to do so. On success, return a clean transaction and the
1236 * inode locked exclusively ready for further operation specific checks. On
1237 * failure, return an error without modifying or locking the inode.
Dave Chinner3a6a8542016-03-01 09:41:33 +11001238 *
1239 * The inode might already be IO locked on call. If this is the case, it is
1240 * indicated in @join_flags and we take full responsibility for ensuring they
1241 * are unlocked from now on. Hence if we have an error here, we still have to
1242 * unlock them. Otherwise, once they are joined to the transaction, they will
1243 * be unlocked on commit/cancel.
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001244 */
1245static struct xfs_trans *
1246xfs_ioctl_setattr_get_trans(
Dave Chinner3a6a8542016-03-01 09:41:33 +11001247 struct xfs_inode *ip,
1248 int join_flags)
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001249{
1250 struct xfs_mount *mp = ip->i_mount;
1251 struct xfs_trans *tp;
Dave Chinner3a6a8542016-03-01 09:41:33 +11001252 int error = -EROFS;
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001253
1254 if (mp->m_flags & XFS_MOUNT_RDONLY)
Dave Chinner3a6a8542016-03-01 09:41:33 +11001255 goto out_unlock;
1256 error = -EIO;
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001257 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner3a6a8542016-03-01 09:41:33 +11001258 goto out_unlock;
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001259
Christoph Hellwig253f4912016-04-06 09:19:55 +10001260 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001261 if (error)
Darrick J. Wong3de5eab2019-04-22 16:28:34 -07001262 goto out_unlock;
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001263
1264 xfs_ilock(ip, XFS_ILOCK_EXCL);
Dave Chinner3a6a8542016-03-01 09:41:33 +11001265 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags);
1266 join_flags = 0;
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001267
1268 /*
1269 * CAP_FOWNER overrides the following restrictions:
1270 *
1271 * The user ID of the calling process must be equal to the file owner
1272 * ID, except in cases where the CAP_FSETID capability is applicable.
1273 */
1274 if (!inode_owner_or_capable(VFS_I(ip))) {
1275 error = -EPERM;
1276 goto out_cancel;
1277 }
1278
1279 if (mp->m_flags & XFS_MOUNT_WSYNC)
1280 xfs_trans_set_sync(tp);
1281
1282 return tp;
1283
1284out_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001285 xfs_trans_cancel(tp);
Dave Chinner3a6a8542016-03-01 09:41:33 +11001286out_unlock:
1287 if (join_flags)
1288 xfs_iunlock(ip, join_flags);
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001289 return ERR_PTR(error);
1290}
1291
Iustin Pop9b94fcc2015-02-02 10:26:26 +11001292/*
1293 * extent size hint validation is somewhat cumbersome. Rules are:
1294 *
1295 * 1. extent size hint is only valid for directories and regular files
Dave Chinnere7b89482016-01-04 16:44:15 +11001296 * 2. FS_XFLAG_EXTSIZE is only valid for regular files
1297 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
Iustin Pop9b94fcc2015-02-02 10:26:26 +11001298 * 4. can only be changed on regular files if no extents are allocated
1299 * 5. can be changed on directories at any time
1300 * 6. extsize hint of 0 turns off hints, clears inode flags.
1301 * 7. Extent size must be a multiple of the appropriate block size.
1302 * 8. for non-realtime files, the extent size hint must be limited
1303 * to half the AG size to avoid alignment extending the extent beyond the
1304 * limits of the AG.
Darrick J. Wong80e4e122017-10-17 21:37:42 -07001305 *
1306 * Please keep this function in sync with xfs_scrub_inode_extsize.
Iustin Pop9b94fcc2015-02-02 10:26:26 +11001307 */
kbuild test robotf92090e2015-02-05 11:13:21 +11001308static int
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001309xfs_ioctl_setattr_check_extsize(
1310 struct xfs_inode *ip,
1311 struct fsxattr *fa)
1312{
1313 struct xfs_mount *mp = ip->i_mount;
Darrick J. Wongca29be72019-07-01 08:25:36 -07001314 xfs_extlen_t size;
1315 xfs_fsblock_t extsize_fsb;
Iustin Pop9b94fcc2015-02-02 10:26:26 +11001316
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001317 if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents &&
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001318 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
1319 return -EINVAL;
1320
Darrick J. Wongca29be72019-07-01 08:25:36 -07001321 if (fa->fsx_extsize == 0)
1322 return 0;
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001323
Darrick J. Wongca29be72019-07-01 08:25:36 -07001324 extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
1325 if (extsize_fsb > MAXEXTLEN)
1326 return -EINVAL;
1327
1328 if (XFS_IS_REALTIME_INODE(ip) ||
1329 (fa->fsx_xflags & FS_XFLAG_REALTIME)) {
1330 size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
1331 } else {
1332 size = mp->m_sb.sb_blocksize;
1333 if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001334 return -EINVAL;
Darrick J. Wongca29be72019-07-01 08:25:36 -07001335 }
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001336
Darrick J. Wongca29be72019-07-01 08:25:36 -07001337 if (fa->fsx_extsize % size)
1338 return -EINVAL;
Iustin Pop9b94fcc2015-02-02 10:26:26 +11001339
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001340 return 0;
1341}
1342
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001343/*
1344 * CoW extent size hint validation rules are:
1345 *
1346 * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
1347 * The inode does not have to have any shared blocks, but it must be a v3.
1348 * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
1349 * for a directory, the hint is propagated to new files.
1350 * 3. Can be changed on files & directories at any time.
1351 * 4. CoW extsize hint of 0 turns off hints, clears inode flags.
1352 * 5. Extent size must be a multiple of the appropriate block size.
1353 * 6. The extent size hint must be limited to half the AG size to avoid
1354 * alignment extending the extent beyond the limits of the AG.
Darrick J. Wong80e4e122017-10-17 21:37:42 -07001355 *
1356 * Please keep this function in sync with xfs_scrub_inode_cowextsize.
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001357 */
1358static int
1359xfs_ioctl_setattr_check_cowextsize(
1360 struct xfs_inode *ip,
1361 struct fsxattr *fa)
1362{
1363 struct xfs_mount *mp = ip->i_mount;
Darrick J. Wongca29be72019-07-01 08:25:36 -07001364 xfs_extlen_t size;
1365 xfs_fsblock_t cowextsize_fsb;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001366
1367 if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
1368 return 0;
1369
1370 if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) ||
1371 ip->i_d.di_version != 3)
1372 return -EINVAL;
1373
Darrick J. Wongca29be72019-07-01 08:25:36 -07001374 if (fa->fsx_cowextsize == 0)
1375 return 0;
1376
1377 cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
1378 if (cowextsize_fsb > MAXEXTLEN)
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001379 return -EINVAL;
1380
Darrick J. Wongca29be72019-07-01 08:25:36 -07001381 size = mp->m_sb.sb_blocksize;
1382 if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2)
1383 return -EINVAL;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001384
Darrick J. Wongca29be72019-07-01 08:25:36 -07001385 if (fa->fsx_cowextsize % size)
1386 return -EINVAL;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001387
1388 return 0;
1389}
1390
kbuild test robotf92090e2015-02-05 11:13:21 +11001391static int
Dave Chinner23bd0732015-02-02 10:22:53 +11001392xfs_ioctl_setattr_check_projid(
1393 struct xfs_inode *ip,
1394 struct fsxattr *fa)
1395{
1396 /* Disallow 32bit project ids if projid32bit feature is not enabled. */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001397 if (fa->fsx_projid > (uint16_t)-1 &&
Dave Chinner23bd0732015-02-02 10:22:53 +11001398 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
1399 return -EINVAL;
Dave Chinner23bd0732015-02-02 10:22:53 +11001400 return 0;
1401}
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001402
1403STATIC int
1404xfs_ioctl_setattr(
1405 xfs_inode_t *ip,
Dave Chinnerfd179b92015-02-02 10:16:25 +11001406 struct fsxattr *fa)
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001407{
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001408 struct fsxattr old_fa;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001409 struct xfs_mount *mp = ip->i_mount;
1410 struct xfs_trans *tp;
Christoph Hellwig7d095252009-06-08 15:33:32 +02001411 struct xfs_dquot *udqp = NULL;
Chandra Seetharaman92f8ff72013-07-11 00:00:40 -05001412 struct xfs_dquot *pdqp = NULL;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001413 struct xfs_dquot *olddquot = NULL;
1414 int code;
Dave Chinner3a6a8542016-03-01 09:41:33 +11001415 int join_flags = 0;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001416
Christoph Hellwigcca28fb2010-06-24 11:57:09 +10001417 trace_xfs_ioctl_setattr(ip);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001418
Dave Chinner23bd0732015-02-02 10:22:53 +11001419 code = xfs_ioctl_setattr_check_projid(ip, fa);
1420 if (code)
1421 return code;
Arkadiusz Mi?kiewicz23963e542010-08-26 10:19:43 +00001422
1423 /*
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001424 * If disk quotas is on, we make sure that the dquots do exist on disk,
1425 * before we start any other transactions. Trying to do this later
1426 * is messy. We don't care to take a readlock to look at the ids
1427 * in inode here, because we can't hold it across the trans_reserve.
1428 * If the IDs do change before we take the ilock, we're covered
1429 * because the i_*dquot fields will get updated anyway.
1430 */
Dave Chinnerfd179b92015-02-02 10:16:25 +11001431 if (XFS_IS_QUOTA_ON(mp)) {
Christoph Hellwig54295152020-02-21 08:31:27 -08001432 code = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
1433 VFS_I(ip)->i_gid, fa->fsx_projid,
1434 XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001435 if (code)
1436 return code;
1437 }
1438
Dave Chinner3a6a8542016-03-01 09:41:33 +11001439 /*
1440 * Changing DAX config may require inode locking for mapping
1441 * invalidation. These need to be held all the way to transaction commit
1442 * or cancel time, so need to be passed through to
1443 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
1444 * appropriately.
1445 */
1446 code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags);
1447 if (code)
1448 goto error_free_dquots;
1449
1450 tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001451 if (IS_ERR(tp)) {
1452 code = PTR_ERR(tp);
1453 goto error_free_dquots;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001454 }
1455
Dave Chinnerfd179b92015-02-02 10:16:25 +11001456 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
Christoph Hellwigde7a8662019-11-12 08:22:54 -08001457 ip->i_d.di_projid != fa->fsx_projid) {
Dave Chinnerfd179b92015-02-02 10:16:25 +11001458 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
1459 capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
1460 if (code) /* out of quota */
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001461 goto error_trans_cancel;
Dave Chinnerfd179b92015-02-02 10:16:25 +11001462 }
1463
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001464 xfs_fill_fsxattr(ip, false, &old_fa);
1465 code = vfs_ioc_fssetxattr_check(VFS_I(ip), &old_fa, fa);
1466 if (code)
1467 goto error_trans_cancel;
1468
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001469 code = xfs_ioctl_setattr_check_extsize(ip, fa);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001470 if (code)
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001471 goto error_trans_cancel;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001472
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001473 code = xfs_ioctl_setattr_check_cowextsize(ip, fa);
1474 if (code)
1475 goto error_trans_cancel;
1476
Dave Chinner29a17c02015-02-02 10:14:25 +11001477 code = xfs_ioctl_setattr_xflags(tp, ip, fa);
1478 if (code)
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001479 goto error_trans_cancel;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001480
1481 /*
Dave Chinnerfd179b92015-02-02 10:16:25 +11001482 * Change file ownership. Must be the owner or privileged. CAP_FSETID
1483 * overrides the following restrictions:
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001484 *
Dave Chinnerfd179b92015-02-02 10:16:25 +11001485 * The set-user-ID and set-group-ID bits of a file will be cleared upon
1486 * successful return from chown()
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001487 */
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001488
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001489 if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
Dave Chinnerfd179b92015-02-02 10:16:25 +11001490 !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001491 VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
Dave Chinnerfd179b92015-02-02 10:16:25 +11001492
1493 /* Change the ownerships and register project quota modifications */
Christoph Hellwigde7a8662019-11-12 08:22:54 -08001494 if (ip->i_d.di_projid != fa->fsx_projid) {
Dave Chinnerfd179b92015-02-02 10:16:25 +11001495 if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
1496 olddquot = xfs_qm_vop_chown(tp, ip,
1497 &ip->i_pdquot, pdqp);
Dwight Engenfd5e2aa2013-08-15 14:08:00 -04001498 }
Dave Chinnerfd179b92015-02-02 10:16:25 +11001499 ASSERT(ip->i_d.di_version > 1);
Christoph Hellwigde7a8662019-11-12 08:22:54 -08001500 ip->i_d.di_projid = fa->fsx_projid;
Christoph Hellwigf13fae22008-07-21 16:16:15 +10001501 }
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001502
Dave Chinnera8727032014-10-02 09:20:30 +10001503 /*
1504 * Only set the extent size hint if we've already determined that the
1505 * extent size hint should be set on the inode. If no extent size flags
1506 * are set on the inode then unconditionally clear the extent size hint.
1507 */
Dave Chinnerfd179b92015-02-02 10:16:25 +11001508 if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
1509 ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
1510 else
1511 ip->i_d.di_extsize = 0;
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07001512 if (ip->i_d.di_version == 3 &&
1513 (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
1514 ip->i_d.di_cowextsize = fa->fsx_cowextsize >>
1515 mp->m_sb.sb_blocklog;
1516 else
1517 ip->i_d.di_cowextsize = 0;
Dave Chinnera8727032014-10-02 09:20:30 +10001518
Christoph Hellwig70393312015-06-04 13:48:08 +10001519 code = xfs_trans_commit(tp);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001520
1521 /*
1522 * Release any dquot(s) the inode had kept before chown.
1523 */
Christoph Hellwig7d095252009-06-08 15:33:32 +02001524 xfs_qm_dqrele(olddquot);
1525 xfs_qm_dqrele(udqp);
Chandra Seetharaman92f8ff72013-07-11 00:00:40 -05001526 xfs_qm_dqrele(pdqp);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001527
Christoph Hellwig288699f2010-06-23 18:11:15 +10001528 return code;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001529
Dave Chinnerd4388d3c2015-02-02 10:22:20 +11001530error_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001531 xfs_trans_cancel(tp);
Dave Chinner8f3d17a2015-02-02 10:15:35 +11001532error_free_dquots:
Christoph Hellwig7d095252009-06-08 15:33:32 +02001533 xfs_qm_dqrele(udqp);
Chandra Seetharaman92f8ff72013-07-11 00:00:40 -05001534 xfs_qm_dqrele(pdqp);
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001535 return code;
1536}
1537
Christoph Hellwigc83bfab2007-10-11 17:47:00 +10001538STATIC int
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001539xfs_ioc_fssetxattr(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 xfs_inode_t *ip,
1541 struct file *filp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 void __user *arg)
1543{
1544 struct fsxattr fa;
Jan Karad9457dc2012-06-12 16:20:39 +02001545 int error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001546
1547 if (copy_from_user(&fa, arg, sizeof(fa)))
1548 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
Jan Karad9457dc2012-06-12 16:20:39 +02001550 error = mnt_want_write_file(filp);
1551 if (error)
1552 return error;
Dave Chinnerfd179b92015-02-02 10:16:25 +11001553 error = xfs_ioctl_setattr(ip, &fa);
Jan Karad9457dc2012-06-12 16:20:39 +02001554 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10001555 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001556}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001558STATIC int
1559xfs_ioc_getxflags(
1560 xfs_inode_t *ip,
1561 void __user *arg)
1562{
1563 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001565 flags = xfs_di2lxflags(ip->i_d.di_flags);
1566 if (copy_to_user(arg, &flags, sizeof(flags)))
1567 return -EFAULT;
1568 return 0;
1569}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001571STATIC int
1572xfs_ioc_setxflags(
Dave Chinnerf96291f2015-02-02 10:15:56 +11001573 struct xfs_inode *ip,
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001574 struct file *filp,
1575 void __user *arg)
1576{
Dave Chinnerf96291f2015-02-02 10:15:56 +11001577 struct xfs_trans *tp;
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001578 struct fsxattr fa;
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001579 struct fsxattr old_fa;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001580 unsigned int flags;
Dave Chinner3a6a8542016-03-01 09:41:33 +11001581 int join_flags = 0;
Dave Chinnerf96291f2015-02-02 10:15:56 +11001582 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001584 if (copy_from_user(&flags, arg, sizeof(flags)))
1585 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001587 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
1588 FS_NOATIME_FL | FS_NODUMP_FL | \
1589 FS_SYNC_FL))
1590 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
Christoph Hellwig25fe55e2008-07-18 17:13:20 +10001592 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
Jan Karad9457dc2012-06-12 16:20:39 +02001594 error = mnt_want_write_file(filp);
1595 if (error)
1596 return error;
Dave Chinnerf96291f2015-02-02 10:15:56 +11001597
Dave Chinner3a6a8542016-03-01 09:41:33 +11001598 /*
1599 * Changing DAX config may require inode locking for mapping
1600 * invalidation. These need to be held all the way to transaction commit
1601 * or cancel time, so need to be passed through to
1602 * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
1603 * appropriately.
1604 */
1605 error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags);
1606 if (error)
1607 goto out_drop_write;
1608
1609 tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
Dave Chinnerf96291f2015-02-02 10:15:56 +11001610 if (IS_ERR(tp)) {
1611 error = PTR_ERR(tp);
1612 goto out_drop_write;
1613 }
1614
Darrick J. Wong7b0e4922019-07-01 08:25:35 -07001615 xfs_fill_fsxattr(ip, false, &old_fa);
1616 error = vfs_ioc_fssetxattr_check(VFS_I(ip), &old_fa, &fa);
1617 if (error) {
1618 xfs_trans_cancel(tp);
1619 goto out_drop_write;
1620 }
1621
Dave Chinnerf96291f2015-02-02 10:15:56 +11001622 error = xfs_ioctl_setattr_xflags(tp, ip, &fa);
1623 if (error) {
Christoph Hellwig4906e212015-06-04 13:47:56 +10001624 xfs_trans_cancel(tp);
Dave Chinnerf96291f2015-02-02 10:15:56 +11001625 goto out_drop_write;
1626 }
1627
Christoph Hellwig70393312015-06-04 13:48:08 +10001628 error = xfs_trans_commit(tp);
Dave Chinnerf96291f2015-02-02 10:15:56 +11001629out_drop_write:
Jan Karad9457dc2012-06-12 16:20:39 +02001630 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10001631 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632}
1633
Christoph Hellwig232b51942017-10-17 14:16:19 -07001634static bool
1635xfs_getbmap_format(
1636 struct kgetbmap *p,
1637 struct getbmapx __user *u,
1638 size_t recsize)
Eric Sandeen8a7141a2008-11-28 14:23:35 +11001639{
Christoph Hellwig232b51942017-10-17 14:16:19 -07001640 if (put_user(p->bmv_offset, &u->bmv_offset) ||
1641 put_user(p->bmv_block, &u->bmv_block) ||
1642 put_user(p->bmv_length, &u->bmv_length) ||
1643 put_user(0, &u->bmv_count) ||
1644 put_user(0, &u->bmv_entries))
1645 return false;
1646 if (recsize < sizeof(struct getbmapx))
1647 return true;
1648 if (put_user(0, &u->bmv_iflags) ||
1649 put_user(p->bmv_oflags, &u->bmv_oflags) ||
1650 put_user(0, &u->bmv_unused1) ||
1651 put_user(0, &u->bmv_unused2))
1652 return false;
1653 return true;
Eric Sandeen8a7141a2008-11-28 14:23:35 +11001654}
1655
1656STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657xfs_ioc_getbmap(
Christoph Hellwig8f3e2052016-07-20 11:29:35 +10001658 struct file *file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 unsigned int cmd,
1660 void __user *arg)
1661{
Darrick J. Wongbe6324c2017-04-03 15:17:57 -07001662 struct getbmapx bmx = { 0 };
Christoph Hellwig232b51942017-10-17 14:16:19 -07001663 struct kgetbmap *buf;
1664 size_t recsize;
1665 int error, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
Christoph Hellwig232b51942017-10-17 14:16:19 -07001667 switch (cmd) {
1668 case XFS_IOC_GETBMAPA:
1669 bmx.bmv_iflags = BMV_IF_ATTRFORK;
1670 /*FALLTHRU*/
1671 case XFS_IOC_GETBMAP:
1672 if (file->f_mode & FMODE_NOCMTIME)
1673 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
1674 /* struct getbmap is a strict subset of struct getbmapx. */
1675 recsize = sizeof(struct getbmap);
1676 break;
1677 case XFS_IOC_GETBMAPX:
1678 recsize = sizeof(struct getbmapx);
1679 break;
1680 default:
1681 return -EINVAL;
1682 }
1683
1684 if (copy_from_user(&bmx, arg, recsize))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10001685 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
Eric Sandeen8a7141a2008-11-28 14:23:35 +11001687 if (bmx.bmv_count < 2)
Eric Sandeenb474c7a2014-06-22 15:04:54 +10001688 return -EINVAL;
Christoph Hellwig232b51942017-10-17 14:16:19 -07001689 if (bmx.bmv_count > ULONG_MAX / recsize)
1690 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
Christoph Hellwig232b51942017-10-17 14:16:19 -07001692 buf = kmem_zalloc_large(bmx.bmv_count * sizeof(*buf), 0);
1693 if (!buf)
1694 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
Christoph Hellwig232b51942017-10-17 14:16:19 -07001696 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 if (error)
Christoph Hellwig232b51942017-10-17 14:16:19 -07001698 goto out_free_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Christoph Hellwig232b51942017-10-17 14:16:19 -07001700 error = -EFAULT;
1701 if (copy_to_user(arg, &bmx, recsize))
1702 goto out_free_buf;
1703 arg += recsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
Christoph Hellwig232b51942017-10-17 14:16:19 -07001705 for (i = 0; i < bmx.bmv_entries; i++) {
1706 if (!xfs_getbmap_format(buf + i, arg, recsize))
1707 goto out_free_buf;
1708 arg += recsize;
1709 }
Eric Sandeen8a7141a2008-11-28 14:23:35 +11001710
Christoph Hellwig232b51942017-10-17 14:16:19 -07001711 error = 0;
1712out_free_buf:
1713 kmem_free(buf);
Christophe JAILLET132bf672018-11-06 07:50:50 -08001714 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715}
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001716
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001717struct getfsmap_info {
1718 struct xfs_mount *mp;
Christoph Hellwig9d17e142017-04-21 11:24:41 -07001719 struct fsmap_head __user *data;
1720 unsigned int idx;
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001721 __u32 last_flags;
1722};
1723
1724STATIC int
1725xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv)
1726{
1727 struct getfsmap_info *info = priv;
1728 struct fsmap fm;
1729
1730 trace_xfs_getfsmap_mapping(info->mp, xfm);
1731
1732 info->last_flags = xfm->fmr_flags;
1733 xfs_fsmap_from_internal(&fm, xfm);
Christoph Hellwig9d17e142017-04-21 11:24:41 -07001734 if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm,
1735 sizeof(struct fsmap)))
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001736 return -EFAULT;
1737
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001738 return 0;
1739}
1740
1741STATIC int
1742xfs_ioc_getfsmap(
1743 struct xfs_inode *ip,
Christoph Hellwig9d17e142017-04-21 11:24:41 -07001744 struct fsmap_head __user *arg)
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001745{
Christoph Hellwigef2b67e2017-04-21 11:24:40 -07001746 struct getfsmap_info info = { NULL };
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001747 struct xfs_fsmap_head xhead = {0};
1748 struct fsmap_head head;
1749 bool aborted = false;
1750 int error;
1751
1752 if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
1753 return -EFAULT;
1754 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
1755 memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
1756 sizeof(head.fmh_keys[0].fmr_reserved)) ||
1757 memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
1758 sizeof(head.fmh_keys[1].fmr_reserved)))
1759 return -EINVAL;
1760
1761 xhead.fmh_iflags = head.fmh_iflags;
1762 xhead.fmh_count = head.fmh_count;
1763 xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
1764 xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
1765
1766 trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
1767 trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
1768
1769 info.mp = ip->i_mount;
Christoph Hellwig9d17e142017-04-21 11:24:41 -07001770 info.data = arg;
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001771 error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
Darrick J. Wonge7ee96d2019-08-28 14:37:57 -07001772 if (error == -ECANCELED) {
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001773 error = 0;
1774 aborted = true;
1775 } else if (error)
1776 return error;
1777
1778 /* If we didn't abort, set the "last" flag in the last fmx */
Darrick J. Wong12e4a382017-04-23 10:45:21 -07001779 if (!aborted && info.idx) {
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001780 info.last_flags |= FMR_OF_LAST;
Christoph Hellwig9d17e142017-04-21 11:24:41 -07001781 if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags,
1782 &info.last_flags, sizeof(info.last_flags)))
Darrick J. Wonge89c0412017-03-28 14:56:37 -07001783 return -EFAULT;
1784 }
1785
1786 /* copy back header */
1787 head.fmh_entries = xhead.fmh_entries;
1788 head.fmh_oflags = xhead.fmh_oflags;
1789 if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
1790 return -EFAULT;
1791
1792 return 0;
1793}
1794
Darrick J. Wong36fd6e82017-10-17 21:37:34 -07001795STATIC int
1796xfs_ioc_scrub_metadata(
1797 struct xfs_inode *ip,
1798 void __user *arg)
1799{
1800 struct xfs_scrub_metadata scrub;
1801 int error;
1802
1803 if (!capable(CAP_SYS_ADMIN))
1804 return -EPERM;
1805
1806 if (copy_from_user(&scrub, arg, sizeof(scrub)))
1807 return -EFAULT;
1808
1809 error = xfs_scrub_metadata(ip, &scrub);
1810 if (error)
1811 return error;
1812
1813 if (copy_to_user(arg, &scrub, sizeof(scrub)))
1814 return -EFAULT;
1815
1816 return 0;
1817}
1818
Dave Chinnera133d952013-08-12 20:49:48 +10001819int
1820xfs_ioc_swapext(
1821 xfs_swapext_t *sxp)
1822{
1823 xfs_inode_t *ip, *tip;
1824 struct fd f, tmp;
1825 int error = 0;
1826
1827 /* Pull information for the target fd */
1828 f = fdget((int)sxp->sx_fdtarget);
1829 if (!f.file) {
Dave Chinner24513372014-06-25 14:58:08 +10001830 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001831 goto out;
1832 }
1833
1834 if (!(f.file->f_mode & FMODE_WRITE) ||
1835 !(f.file->f_mode & FMODE_READ) ||
1836 (f.file->f_flags & O_APPEND)) {
Dave Chinner24513372014-06-25 14:58:08 +10001837 error = -EBADF;
Dave Chinnera133d952013-08-12 20:49:48 +10001838 goto out_put_file;
1839 }
1840
1841 tmp = fdget((int)sxp->sx_fdtmp);
1842 if (!tmp.file) {
Dave Chinner24513372014-06-25 14:58:08 +10001843 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001844 goto out_put_file;
1845 }
1846
1847 if (!(tmp.file->f_mode & FMODE_WRITE) ||
1848 !(tmp.file->f_mode & FMODE_READ) ||
1849 (tmp.file->f_flags & O_APPEND)) {
Dave Chinner24513372014-06-25 14:58:08 +10001850 error = -EBADF;
Dave Chinnera133d952013-08-12 20:49:48 +10001851 goto out_put_tmp_file;
1852 }
1853
1854 if (IS_SWAPFILE(file_inode(f.file)) ||
1855 IS_SWAPFILE(file_inode(tmp.file))) {
Dave Chinner24513372014-06-25 14:58:08 +10001856 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001857 goto out_put_tmp_file;
1858 }
1859
Jann Horn7f1b6242016-07-20 10:30:30 +10001860 /*
1861 * We need to ensure that the fds passed in point to XFS inodes
1862 * before we cast and access them as XFS structures as we have no
1863 * control over what the user passes us here.
1864 */
1865 if (f.file->f_op != &xfs_file_operations ||
1866 tmp.file->f_op != &xfs_file_operations) {
1867 error = -EINVAL;
1868 goto out_put_tmp_file;
1869 }
1870
Dave Chinnera133d952013-08-12 20:49:48 +10001871 ip = XFS_I(file_inode(f.file));
1872 tip = XFS_I(file_inode(tmp.file));
1873
1874 if (ip->i_mount != tip->i_mount) {
Dave Chinner24513372014-06-25 14:58:08 +10001875 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001876 goto out_put_tmp_file;
1877 }
1878
1879 if (ip->i_ino == tip->i_ino) {
Dave Chinner24513372014-06-25 14:58:08 +10001880 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001881 goto out_put_tmp_file;
1882 }
1883
1884 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Dave Chinner24513372014-06-25 14:58:08 +10001885 error = -EIO;
Dave Chinnera133d952013-08-12 20:49:48 +10001886 goto out_put_tmp_file;
1887 }
1888
1889 error = xfs_swap_extents(ip, tip, sxp);
1890
1891 out_put_tmp_file:
1892 fdput(tmp);
1893 out_put_file:
1894 fdput(f);
1895 out:
1896 return error;
1897}
1898
Eric Sandeenf7664b32018-05-15 13:21:48 -07001899static int
1900xfs_ioc_getlabel(
1901 struct xfs_mount *mp,
1902 char __user *user_label)
1903{
1904 struct xfs_sb *sbp = &mp->m_sb;
1905 char label[XFSLABEL_MAX + 1];
1906
1907 /* Paranoia */
1908 BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX);
1909
Arnd Bergmann4bb8b652018-06-05 19:42:45 -07001910 /* 1 larger than sb_fname, so this ensures a trailing NUL char */
1911 memset(label, 0, sizeof(label));
Eric Sandeenf7664b32018-05-15 13:21:48 -07001912 spin_lock(&mp->m_sb_lock);
Arnd Bergmann4bb8b652018-06-05 19:42:45 -07001913 strncpy(label, sbp->sb_fname, XFSLABEL_MAX);
Eric Sandeenf7664b32018-05-15 13:21:48 -07001914 spin_unlock(&mp->m_sb_lock);
1915
Arnd Bergmann4bb8b652018-06-05 19:42:45 -07001916 if (copy_to_user(user_label, label, sizeof(label)))
Eric Sandeenf7664b32018-05-15 13:21:48 -07001917 return -EFAULT;
1918 return 0;
1919}
1920
1921static int
1922xfs_ioc_setlabel(
1923 struct file *filp,
1924 struct xfs_mount *mp,
1925 char __user *newlabel)
1926{
1927 struct xfs_sb *sbp = &mp->m_sb;
1928 char label[XFSLABEL_MAX + 1];
1929 size_t len;
1930 int error;
1931
1932 if (!capable(CAP_SYS_ADMIN))
1933 return -EPERM;
1934 /*
1935 * The generic ioctl allows up to FSLABEL_MAX chars, but XFS is much
1936 * smaller, at 12 bytes. We copy one more to be sure we find the
1937 * (required) NULL character to test the incoming label length.
1938 * NB: The on disk label doesn't need to be null terminated.
1939 */
1940 if (copy_from_user(label, newlabel, XFSLABEL_MAX + 1))
1941 return -EFAULT;
1942 len = strnlen(label, XFSLABEL_MAX + 1);
1943 if (len > sizeof(sbp->sb_fname))
1944 return -EINVAL;
1945
1946 error = mnt_want_write_file(filp);
1947 if (error)
1948 return error;
1949
1950 spin_lock(&mp->m_sb_lock);
1951 memset(sbp->sb_fname, 0, sizeof(sbp->sb_fname));
Arnd Bergmann4bb8b652018-06-05 19:42:45 -07001952 memcpy(sbp->sb_fname, label, len);
Eric Sandeenf7664b32018-05-15 13:21:48 -07001953 spin_unlock(&mp->m_sb_lock);
1954
1955 /*
1956 * Now we do several things to satisfy userspace.
1957 * In addition to normal logging of the primary superblock, we also
1958 * immediately write these changes to sector zero for the primary, then
1959 * update all backup supers (as xfs_db does for a label change), then
1960 * invalidate the block device page cache. This is so that any prior
1961 * buffered reads from userspace (i.e. from blkid) are invalidated,
1962 * and userspace will see the newly-written label.
1963 */
1964 error = xfs_sync_sb_buf(mp);
1965 if (error)
1966 goto out;
1967 /*
1968 * growfs also updates backup supers so lock against that.
1969 */
1970 mutex_lock(&mp->m_growlock);
1971 error = xfs_update_secondary_sbs(mp);
1972 mutex_unlock(&mp->m_growlock);
1973
1974 invalidate_bdev(mp->m_ddev_targp->bt_bdev);
1975
1976out:
1977 mnt_drop_write_file(filp);
1978 return error;
1979}
1980
Christoph Hellwig4d4be482008-12-09 04:47:33 -05001981/*
1982 * Note: some of the ioctl's return positive numbers as a
1983 * byte count indicating success, such as readlink_by_handle.
1984 * So we don't "sign flip" like most other routines. This means
1985 * true errors need to be returned as a negative value.
1986 */
1987long
1988xfs_file_ioctl(
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001989 struct file *filp,
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001990 unsigned int cmd,
Christoph Hellwig4d4be482008-12-09 04:47:33 -05001991 unsigned long p)
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001992{
Al Viro496ad9a2013-01-23 17:07:38 -05001993 struct inode *inode = file_inode(filp);
Christoph Hellwig4d4be482008-12-09 04:47:33 -05001994 struct xfs_inode *ip = XFS_I(inode);
1995 struct xfs_mount *mp = ip->i_mount;
1996 void __user *arg = (void __user *)p;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10001997 int error;
1998
Christoph Hellwigcca28fb2010-06-24 11:57:09 +10001999 trace_xfs_file_ioctl(ip);
Christoph Hellwig4d4be482008-12-09 04:47:33 -05002000
2001 switch (cmd) {
Christoph Hellwiga46db602011-01-07 13:02:04 +00002002 case FITRIM:
2003 return xfs_ioc_trim(mp, arg);
Eric Sandeenf7664b32018-05-15 13:21:48 -07002004 case FS_IOC_GETFSLABEL:
2005 return xfs_ioc_getlabel(mp, arg);
2006 case FS_IOC_SETFSLABEL:
2007 return xfs_ioc_setlabel(filp, mp, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002008 case XFS_IOC_ALLOCSP:
2009 case XFS_IOC_FREESP:
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002010 case XFS_IOC_ALLOCSP64:
Christoph Hellwig837a6e72019-10-24 22:26:02 -07002011 case XFS_IOC_FREESP64: {
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002012 xfs_flock64_t bf;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002013
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002014 if (copy_from_user(&bf, arg, sizeof(bf)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002015 return -EFAULT;
Christoph Hellwig837a6e72019-10-24 22:26:02 -07002016 return xfs_ioc_space(filp, &bf);
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002017 }
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002018 case XFS_IOC_DIOINFO: {
Christoph Hellwigc7d68312019-10-24 22:25:39 -07002019 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
2020 struct dioattr da;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002021
Eric Sandeen7c71ee72014-01-21 16:46:23 -06002022 da.d_mem = da.d_miniosz = target->bt_logical_sectorsize;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002023 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
2024
2025 if (copy_to_user(arg, &da, sizeof(da)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002026 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002027 return 0;
2028 }
2029
2030 case XFS_IOC_FSBULKSTAT_SINGLE:
2031 case XFS_IOC_FSBULKSTAT:
2032 case XFS_IOC_FSINUMBERS:
Darrick J. Wong8bfe9d12019-07-03 20:36:26 -07002033 return xfs_ioc_fsbulkstat(mp, cmd, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002034
Darrick J. Wong0448b6f2019-07-03 20:36:27 -07002035 case XFS_IOC_BULKSTAT:
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002036 return xfs_ioc_bulkstat(mp, cmd, arg);
Darrick J. Wongfba97602019-07-03 20:36:28 -07002037 case XFS_IOC_INUMBERS:
2038 return xfs_ioc_inumbers(mp, cmd, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002039
2040 case XFS_IOC_FSGEOMETRY_V1:
Dave Chinner1b6d9682019-04-12 07:41:16 -07002041 return xfs_ioc_fsgeometry(mp, arg, 3);
2042 case XFS_IOC_FSGEOMETRY_V4:
2043 return xfs_ioc_fsgeometry(mp, arg, 4);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002044 case XFS_IOC_FSGEOMETRY:
Dave Chinner1b6d9682019-04-12 07:41:16 -07002045 return xfs_ioc_fsgeometry(mp, arg, 5);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002046
Darrick J. Wong7cd50062019-04-12 07:41:17 -07002047 case XFS_IOC_AG_GEOMETRY:
2048 return xfs_ioc_ag_geometry(mp, arg);
2049
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002050 case XFS_IOC_GETVERSION:
2051 return put_user(inode->i_generation, (int __user *)arg);
2052
2053 case XFS_IOC_FSGETXATTR:
2054 return xfs_ioc_fsgetxattr(ip, 0, arg);
2055 case XFS_IOC_FSGETXATTRA:
2056 return xfs_ioc_fsgetxattr(ip, 1, arg);
Lachlan McIlroy3b2816b2008-04-18 12:43:35 +10002057 case XFS_IOC_FSSETXATTR:
Lachlan McIlroy65e67f52008-04-18 12:59:45 +10002058 return xfs_ioc_fssetxattr(ip, filp, arg);
2059 case XFS_IOC_GETXFLAGS:
2060 return xfs_ioc_getxflags(ip, arg);
2061 case XFS_IOC_SETXFLAGS:
2062 return xfs_ioc_setxflags(ip, filp, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002063
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002064 case XFS_IOC_GETBMAP:
2065 case XFS_IOC_GETBMAPA:
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002066 case XFS_IOC_GETBMAPX:
Christoph Hellwig232b51942017-10-17 14:16:19 -07002067 return xfs_ioc_getbmap(filp, cmd, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002068
Darrick J. Wonge89c0412017-03-28 14:56:37 -07002069 case FS_IOC_GETFSMAP:
2070 return xfs_ioc_getfsmap(ip, arg);
2071
Darrick J. Wong36fd6e82017-10-17 21:37:34 -07002072 case XFS_IOC_SCRUB_METADATA:
2073 return xfs_ioc_scrub_metadata(ip, arg);
2074
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002075 case XFS_IOC_FD_TO_HANDLE:
2076 case XFS_IOC_PATH_TO_HANDLE:
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002077 case XFS_IOC_PATH_TO_FSHANDLE: {
2078 xfs_fsop_handlereq_t hreq;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002079
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002080 if (copy_from_user(&hreq, arg, sizeof(hreq)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002081 return -EFAULT;
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002082 return xfs_find_handle(cmd, &hreq);
2083 }
2084 case XFS_IOC_OPEN_BY_HANDLE: {
2085 xfs_fsop_handlereq_t hreq;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002086
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002087 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002088 return -EFAULT;
Christoph Hellwigd296d302009-01-19 02:02:57 +01002089 return xfs_open_by_handle(filp, &hreq);
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002090 }
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002091
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002092 case XFS_IOC_READLINK_BY_HANDLE: {
2093 xfs_fsop_handlereq_t hreq;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002094
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002095 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002096 return -EFAULT;
Christoph Hellwigd296d302009-01-19 02:02:57 +01002097 return xfs_readlink_by_handle(filp, &hreq);
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002098 }
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002099 case XFS_IOC_ATTRLIST_BY_HANDLE:
Christoph Hellwigd296d302009-01-19 02:02:57 +01002100 return xfs_attrlist_by_handle(filp, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002101
2102 case XFS_IOC_ATTRMULTI_BY_HANDLE:
Christoph Hellwigd296d302009-01-19 02:02:57 +01002103 return xfs_attrmulti_by_handle(filp, arg);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002104
2105 case XFS_IOC_SWAPEXT: {
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -06002106 struct xfs_swapext sxp;
2107
2108 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002109 return -EFAULT;
Jan Karad9457dc2012-06-12 16:20:39 +02002110 error = mnt_want_write_file(filp);
2111 if (error)
2112 return error;
Dave Chinnera133d952013-08-12 20:49:48 +10002113 error = xfs_ioc_swapext(&sxp);
Jan Karad9457dc2012-06-12 16:20:39 +02002114 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10002115 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002116 }
2117
2118 case XFS_IOC_FSCOUNTS: {
2119 xfs_fsop_counts_t out;
2120
Eric Sandeen91083262019-05-01 20:26:30 -07002121 xfs_fs_counts(mp, &out);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002122
2123 if (copy_to_user(arg, &out, sizeof(out)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002124 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002125 return 0;
2126 }
2127
2128 case XFS_IOC_SET_RESBLKS: {
2129 xfs_fsop_resblks_t inout;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07002130 uint64_t in;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002131
2132 if (!capable(CAP_SYS_ADMIN))
2133 return -EPERM;
2134
Eric Sandeend5db0f92010-02-05 22:59:53 +00002135 if (mp->m_flags & XFS_MOUNT_RDONLY)
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002136 return -EROFS;
Eric Sandeend5db0f92010-02-05 22:59:53 +00002137
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002138 if (copy_from_user(&inout, arg, sizeof(inout)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002139 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002140
Jan Karad9457dc2012-06-12 16:20:39 +02002141 error = mnt_want_write_file(filp);
2142 if (error)
2143 return error;
2144
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002145 /* input parameter is passed in resblks field of structure */
2146 in = inout.resblks;
2147 error = xfs_reserve_blocks(mp, &in, &inout);
Jan Karad9457dc2012-06-12 16:20:39 +02002148 mnt_drop_write_file(filp);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002149 if (error)
Dave Chinner24513372014-06-25 14:58:08 +10002150 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002151
2152 if (copy_to_user(arg, &inout, sizeof(inout)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002153 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002154 return 0;
2155 }
2156
2157 case XFS_IOC_GET_RESBLKS: {
2158 xfs_fsop_resblks_t out;
2159
2160 if (!capable(CAP_SYS_ADMIN))
2161 return -EPERM;
2162
2163 error = xfs_reserve_blocks(mp, NULL, &out);
2164 if (error)
Dave Chinner24513372014-06-25 14:58:08 +10002165 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002166
2167 if (copy_to_user(arg, &out, sizeof(out)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002168 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002169
2170 return 0;
2171 }
2172
2173 case XFS_IOC_FSGROWFSDATA: {
2174 xfs_growfs_data_t in;
2175
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002176 if (copy_from_user(&in, arg, sizeof(in)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002177 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002178
Jan Karad9457dc2012-06-12 16:20:39 +02002179 error = mnt_want_write_file(filp);
2180 if (error)
2181 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002182 error = xfs_growfs_data(mp, &in);
Jan Karad9457dc2012-06-12 16:20:39 +02002183 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10002184 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002185 }
2186
2187 case XFS_IOC_FSGROWFSLOG: {
2188 xfs_growfs_log_t in;
2189
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002190 if (copy_from_user(&in, arg, sizeof(in)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002191 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002192
Jan Karad9457dc2012-06-12 16:20:39 +02002193 error = mnt_want_write_file(filp);
2194 if (error)
2195 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002196 error = xfs_growfs_log(mp, &in);
Jan Karad9457dc2012-06-12 16:20:39 +02002197 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10002198 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002199 }
2200
2201 case XFS_IOC_FSGROWFSRT: {
2202 xfs_growfs_rt_t in;
2203
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002204 if (copy_from_user(&in, arg, sizeof(in)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002205 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002206
Jan Karad9457dc2012-06-12 16:20:39 +02002207 error = mnt_want_write_file(filp);
2208 if (error)
2209 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002210 error = xfs_growfs_rt(mp, &in);
Jan Karad9457dc2012-06-12 16:20:39 +02002211 mnt_drop_write_file(filp);
Dave Chinner24513372014-06-25 14:58:08 +10002212 return error;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002213 }
2214
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002215 case XFS_IOC_GOINGDOWN: {
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07002216 uint32_t in;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002217
2218 if (!capable(CAP_SYS_ADMIN))
2219 return -EPERM;
2220
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07002221 if (get_user(in, (uint32_t __user *)arg))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002222 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002223
Dave Chinner24513372014-06-25 14:58:08 +10002224 return xfs_fs_goingdown(mp, in);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002225 }
2226
2227 case XFS_IOC_ERROR_INJECTION: {
2228 xfs_error_injection_t in;
2229
2230 if (!capable(CAP_SYS_ADMIN))
2231 return -EPERM;
2232
2233 if (copy_from_user(&in, arg, sizeof(in)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002234 return -EFAULT;
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002235
Darrick J. Wong31965ef2017-06-20 17:54:46 -07002236 return xfs_errortag_add(mp, in.errtag);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002237 }
2238
2239 case XFS_IOC_ERROR_CLEARALL:
2240 if (!capable(CAP_SYS_ADMIN))
2241 return -EPERM;
2242
Darrick J. Wong31965ef2017-06-20 17:54:46 -07002243 return xfs_errortag_clearall(mp);
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002244
Brian Foster8ca149d2012-11-07 12:21:12 -05002245 case XFS_IOC_FREE_EOFBLOCKS: {
Dwight Engenb9fe5052013-08-15 14:08:02 -04002246 struct xfs_fs_eofblocks eofb;
2247 struct xfs_eofblocks keofb;
Brian Foster8ca149d2012-11-07 12:21:12 -05002248
Dwight Engen8c567a72013-08-15 14:08:03 -04002249 if (!capable(CAP_SYS_ADMIN))
2250 return -EPERM;
2251
2252 if (mp->m_flags & XFS_MOUNT_RDONLY)
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002253 return -EROFS;
Dwight Engen8c567a72013-08-15 14:08:03 -04002254
Brian Foster8ca149d2012-11-07 12:21:12 -05002255 if (copy_from_user(&eofb, arg, sizeof(eofb)))
Eric Sandeenb474c7a2014-06-22 15:04:54 +10002256 return -EFAULT;
Brian Foster8ca149d2012-11-07 12:21:12 -05002257
Dwight Engenb9fe5052013-08-15 14:08:02 -04002258 error = xfs_fs_eofblocks_from_user(&eofb, &keofb);
2259 if (error)
Dave Chinner24513372014-06-25 14:58:08 +10002260 return error;
Brian Foster8ca149d2012-11-07 12:21:12 -05002261
Dave Chinner24513372014-06-25 14:58:08 +10002262 return xfs_icache_free_eofblocks(mp, &keofb);
Brian Foster8ca149d2012-11-07 12:21:12 -05002263 }
2264
Lachlan McIlroydf26cfe2008-04-18 11:44:03 +10002265 default:
2266 return -ENOTTY;
2267 }
2268}