Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2004-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Christoph Hellwig | 62e194e | 2009-01-19 02:03:03 +0100 | [diff] [blame] | 6 | #include <linux/mount.h> |
Darrick J. Wong | e89c041 | 2017-03-28 14:56:37 -0700 | [diff] [blame] | 7 | #include <linux/fsmap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include "xfs.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include "xfs_fs.h" |
Darrick J. Wong | 5467b34 | 2019-06-28 19:25:35 -0700 | [diff] [blame] | 10 | #include "xfs_shared.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 11 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 12 | #include "xfs_log_format.h" |
| 13 | #include "xfs_trans_resv.h" |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 14 | #include "xfs_mount.h" |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 15 | #include "xfs_inode.h" |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 16 | #include "xfs_iwalk.h" |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 17 | #include "xfs_itable.h" |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 18 | #include "xfs_fsops.h" |
sandeen@sandeen.net | 471d591 | 2008-11-25 21:20:10 -0600 | [diff] [blame] | 19 | #include "xfs_rtalloc.h" |
sandeen@sandeen.net | ebeecd2 | 2008-11-25 21:20:14 -0600 | [diff] [blame] | 20 | #include "xfs_attr.h" |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 21 | #include "xfs_ioctl.h" |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 22 | #include "xfs_ioctl32.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 23 | #include "xfs_trace.h" |
Darrick J. Wong | c368ebc | 2018-01-08 10:51:27 -0800 | [diff] [blame] | 24 | #include "xfs_sb.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Eric Sandeen | 526c420 | 2005-09-05 08:25:06 +1000 | [diff] [blame] | 26 | #define _NATIVE_IOC(cmd, type) \ |
| 27 | _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) |
| 28 | |
sandeen@sandeen.net | ffae263 | 2008-11-25 21:20:07 -0600 | [diff] [blame] | 29 | #ifdef BROKEN_X86_ALIGNMENT |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 30 | STATIC int |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 31 | xfs_compat_ioc_fsgeometry_v1( |
| 32 | struct xfs_mount *mp, |
| 33 | compat_xfs_fsop_geom_v1_t __user *arg32) |
Michal Marek | 547e00c | 2007-07-11 11:09:57 +1000 | [diff] [blame] | 34 | { |
Dave Chinner | 1b6d968 | 2019-04-12 07:41:16 -0700 | [diff] [blame] | 35 | struct xfs_fsop_geom fsgeo; |
Michal Marek | 547e00c | 2007-07-11 11:09:57 +1000 | [diff] [blame] | 36 | |
Dave Chinner | 03288b1 | 2021-08-18 18:46:54 -0700 | [diff] [blame] | 37 | xfs_fs_geometry(mp, &fsgeo, 3); |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 38 | /* The 32-bit variant simply has some padding at the end */ |
| 39 | if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1))) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 40 | return -EFAULT; |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 41 | return 0; |
Michal Marek | 547e00c | 2007-07-11 11:09:57 +1000 | [diff] [blame] | 42 | } |
| 43 | |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 44 | STATIC int |
sandeen@sandeen.net | 471d591 | 2008-11-25 21:20:10 -0600 | [diff] [blame] | 45 | xfs_compat_growfs_data_copyin( |
| 46 | struct xfs_growfs_data *in, |
| 47 | compat_xfs_growfs_data_t __user *arg32) |
| 48 | { |
| 49 | if (get_user(in->newblocks, &arg32->newblocks) || |
| 50 | get_user(in->imaxpct, &arg32->imaxpct)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 51 | return -EFAULT; |
sandeen@sandeen.net | 471d591 | 2008-11-25 21:20:10 -0600 | [diff] [blame] | 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | STATIC int |
| 56 | xfs_compat_growfs_rt_copyin( |
| 57 | struct xfs_growfs_rt *in, |
| 58 | compat_xfs_growfs_rt_t __user *arg32) |
| 59 | { |
| 60 | if (get_user(in->newblocks, &arg32->newblocks) || |
| 61 | get_user(in->extsize, &arg32->extsize)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 62 | return -EFAULT; |
sandeen@sandeen.net | 471d591 | 2008-11-25 21:20:10 -0600 | [diff] [blame] | 63 | return 0; |
| 64 | } |
| 65 | |
| 66 | STATIC int |
Darrick J. Wong | 8bfe9d1 | 2019-07-03 20:36:26 -0700 | [diff] [blame] | 67 | xfs_fsinumbers_fmt_compat( |
Darrick J. Wong | 5f19c7f | 2019-07-03 20:36:27 -0700 | [diff] [blame] | 68 | struct xfs_ibulk *breq, |
| 69 | const struct xfs_inumbers *ig) |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 70 | { |
Darrick J. Wong | 5f19c7f | 2019-07-03 20:36:27 -0700 | [diff] [blame] | 71 | struct compat_xfs_inogrp __user *p32 = breq->ubuffer; |
| 72 | struct xfs_inogrp ig1; |
| 73 | struct xfs_inogrp *igrp = &ig1; |
| 74 | |
| 75 | xfs_inumbers_to_inogrp(&ig1, ig); |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 76 | |
Darrick J. Wong | 677717f | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 77 | if (put_user(igrp->xi_startino, &p32->xi_startino) || |
| 78 | put_user(igrp->xi_alloccount, &p32->xi_alloccount) || |
| 79 | put_user(igrp->xi_allocmask, &p32->xi_allocmask)) |
| 80 | return -EFAULT; |
| 81 | |
| 82 | return xfs_ibulk_advance(breq, sizeof(struct compat_xfs_inogrp)); |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 83 | } |
| 84 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | #else |
Darrick J. Wong | 8bfe9d1 | 2019-07-03 20:36:26 -0700 | [diff] [blame] | 86 | #define xfs_fsinumbers_fmt_compat xfs_fsinumbers_fmt |
sandeen@sandeen.net | e5d412f | 2008-11-25 21:20:17 -0600 | [diff] [blame] | 87 | #endif /* BROKEN_X86_ALIGNMENT */ |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 88 | |
sandeen@sandeen.net | e94fc4a | 2008-11-25 21:20:09 -0600 | [diff] [blame] | 89 | STATIC int |
| 90 | xfs_ioctl32_bstime_copyin( |
| 91 | xfs_bstime_t *bstime, |
| 92 | compat_xfs_bstime_t __user *bstime32) |
| 93 | { |
Arnd Bergmann | 3b62f00 | 2020-01-02 13:27:44 -0800 | [diff] [blame] | 94 | old_time32_t sec32; /* tv_sec differs on 64 vs. 32 */ |
sandeen@sandeen.net | e94fc4a | 2008-11-25 21:20:09 -0600 | [diff] [blame] | 95 | |
| 96 | if (get_user(sec32, &bstime32->tv_sec) || |
| 97 | get_user(bstime->tv_nsec, &bstime32->tv_nsec)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 98 | return -EFAULT; |
sandeen@sandeen.net | e94fc4a | 2008-11-25 21:20:09 -0600 | [diff] [blame] | 99 | bstime->tv_sec = sec32; |
| 100 | return 0; |
| 101 | } |
| 102 | |
Darrick J. Wong | 6f71fb6 | 2019-07-03 20:36:25 -0700 | [diff] [blame] | 103 | /* |
| 104 | * struct xfs_bstat has differing alignment on intel, & bstime_t sizes |
| 105 | * everywhere |
| 106 | */ |
sandeen@sandeen.net | e94fc4a | 2008-11-25 21:20:09 -0600 | [diff] [blame] | 107 | STATIC int |
| 108 | xfs_ioctl32_bstat_copyin( |
Darrick J. Wong | 6f71fb6 | 2019-07-03 20:36:25 -0700 | [diff] [blame] | 109 | struct xfs_bstat *bstat, |
| 110 | struct compat_xfs_bstat __user *bstat32) |
sandeen@sandeen.net | e94fc4a | 2008-11-25 21:20:09 -0600 | [diff] [blame] | 111 | { |
| 112 | if (get_user(bstat->bs_ino, &bstat32->bs_ino) || |
| 113 | get_user(bstat->bs_mode, &bstat32->bs_mode) || |
| 114 | get_user(bstat->bs_nlink, &bstat32->bs_nlink) || |
| 115 | get_user(bstat->bs_uid, &bstat32->bs_uid) || |
| 116 | get_user(bstat->bs_gid, &bstat32->bs_gid) || |
| 117 | get_user(bstat->bs_rdev, &bstat32->bs_rdev) || |
| 118 | get_user(bstat->bs_blksize, &bstat32->bs_blksize) || |
| 119 | get_user(bstat->bs_size, &bstat32->bs_size) || |
| 120 | xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) || |
| 121 | xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) || |
| 122 | xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) || |
| 123 | get_user(bstat->bs_blocks, &bstat32->bs_size) || |
| 124 | get_user(bstat->bs_xflags, &bstat32->bs_size) || |
| 125 | get_user(bstat->bs_extsize, &bstat32->bs_extsize) || |
| 126 | get_user(bstat->bs_extents, &bstat32->bs_extents) || |
| 127 | get_user(bstat->bs_gen, &bstat32->bs_gen) || |
Arkadiusz Mi?kiewicz | 6743099 | 2010-09-26 06:10:18 +0000 | [diff] [blame] | 128 | get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) || |
| 129 | get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) || |
Dave Chinner | b1d6cc0 | 2014-10-02 09:17:58 +1000 | [diff] [blame] | 130 | get_user(bstat->bs_forkoff, &bstat32->bs_forkoff) || |
sandeen@sandeen.net | e94fc4a | 2008-11-25 21:20:09 -0600 | [diff] [blame] | 131 | get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || |
| 132 | get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || |
| 133 | get_user(bstat->bs_aextents, &bstat32->bs_aextents)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 134 | return -EFAULT; |
sandeen@sandeen.net | e94fc4a | 2008-11-25 21:20:09 -0600 | [diff] [blame] | 135 | return 0; |
| 136 | } |
| 137 | |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 138 | /* XFS_IOC_FSBULKSTAT and friends */ |
| 139 | |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 140 | STATIC int |
| 141 | xfs_bstime_store_compat( |
| 142 | compat_xfs_bstime_t __user *p32, |
| 143 | const xfs_bstime_t *p) |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 144 | { |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 145 | __s32 sec32; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 146 | |
| 147 | sec32 = p->tv_sec; |
| 148 | if (put_user(sec32, &p32->tv_sec) || |
| 149 | put_user(p->tv_nsec, &p32->tv_nsec)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 150 | return -EFAULT; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 151 | return 0; |
| 152 | } |
| 153 | |
sandeen@sandeen.net | 65fbaf2 | 2008-11-25 21:20:12 -0600 | [diff] [blame] | 154 | /* Return 0 on success or positive error (to xfs_bulkstat()) */ |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 155 | STATIC int |
Darrick J. Wong | 8bfe9d1 | 2019-07-03 20:36:26 -0700 | [diff] [blame] | 156 | xfs_fsbulkstat_one_fmt_compat( |
Darrick J. Wong | 7035f97 | 2019-07-03 20:36:26 -0700 | [diff] [blame] | 157 | struct xfs_ibulk *breq, |
| 158 | const struct xfs_bulkstat *bstat) |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 159 | { |
Darrick J. Wong | 7035f97 | 2019-07-03 20:36:26 -0700 | [diff] [blame] | 160 | struct compat_xfs_bstat __user *p32 = breq->ubuffer; |
| 161 | struct xfs_bstat bs1; |
| 162 | struct xfs_bstat *buffer = &bs1; |
| 163 | |
| 164 | xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat); |
sandeen@sandeen.net | 65fbaf2 | 2008-11-25 21:20:12 -0600 | [diff] [blame] | 165 | |
| 166 | if (put_user(buffer->bs_ino, &p32->bs_ino) || |
| 167 | put_user(buffer->bs_mode, &p32->bs_mode) || |
| 168 | put_user(buffer->bs_nlink, &p32->bs_nlink) || |
| 169 | put_user(buffer->bs_uid, &p32->bs_uid) || |
| 170 | put_user(buffer->bs_gid, &p32->bs_gid) || |
| 171 | put_user(buffer->bs_rdev, &p32->bs_rdev) || |
| 172 | put_user(buffer->bs_blksize, &p32->bs_blksize) || |
| 173 | put_user(buffer->bs_size, &p32->bs_size) || |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 174 | xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) || |
| 175 | xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) || |
| 176 | xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) || |
sandeen@sandeen.net | 65fbaf2 | 2008-11-25 21:20:12 -0600 | [diff] [blame] | 177 | put_user(buffer->bs_blocks, &p32->bs_blocks) || |
| 178 | put_user(buffer->bs_xflags, &p32->bs_xflags) || |
| 179 | put_user(buffer->bs_extsize, &p32->bs_extsize) || |
| 180 | put_user(buffer->bs_extents, &p32->bs_extents) || |
| 181 | put_user(buffer->bs_gen, &p32->bs_gen) || |
| 182 | put_user(buffer->bs_projid, &p32->bs_projid) || |
Arkadiusz Mi?kiewicz | 6743099 | 2010-09-26 06:10:18 +0000 | [diff] [blame] | 183 | put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) || |
Dave Chinner | b1d6cc0 | 2014-10-02 09:17:58 +1000 | [diff] [blame] | 184 | put_user(buffer->bs_forkoff, &p32->bs_forkoff) || |
sandeen@sandeen.net | 65fbaf2 | 2008-11-25 21:20:12 -0600 | [diff] [blame] | 185 | put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || |
| 186 | put_user(buffer->bs_dmstate, &p32->bs_dmstate) || |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 187 | put_user(buffer->bs_aextents, &p32->bs_aextents)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 188 | return -EFAULT; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 189 | |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 190 | return xfs_ibulk_advance(breq, sizeof(struct compat_xfs_bstat)); |
sandeen@sandeen.net | 2ee4fa5 | 2008-11-25 21:20:11 -0600 | [diff] [blame] | 191 | } |
| 192 | |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 193 | /* copied from xfs_ioctl.c */ |
| 194 | STATIC int |
Darrick J. Wong | 8bfe9d1 | 2019-07-03 20:36:26 -0700 | [diff] [blame] | 195 | xfs_compat_ioc_fsbulkstat( |
Christoph Hellwig | f736d93 | 2021-01-21 14:19:58 +0100 | [diff] [blame] | 196 | struct file *file, |
sandeen@sandeen.net | 2ee4fa5 | 2008-11-25 21:20:11 -0600 | [diff] [blame] | 197 | unsigned int cmd, |
Darrick J. Wong | 6f71fb6 | 2019-07-03 20:36:25 -0700 | [diff] [blame] | 198 | struct compat_xfs_fsop_bulkreq __user *p32) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | { |
Christoph Hellwig | f736d93 | 2021-01-21 14:19:58 +0100 | [diff] [blame] | 200 | struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | u32 addr; |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 202 | struct xfs_fsop_bulkreq bulkreq; |
| 203 | struct xfs_ibulk breq = { |
| 204 | .mp = mp, |
Christoph Hellwig | f736d93 | 2021-01-21 14:19:58 +0100 | [diff] [blame] | 205 | .mnt_userns = file_mnt_user_ns(file), |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 206 | .ocount = 0, |
| 207 | }; |
| 208 | xfs_ino_t lastino; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 209 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | |
Nick Bowler | 7ca860e | 2018-12-17 09:35:27 -0800 | [diff] [blame] | 211 | /* |
| 212 | * Output structure handling functions. Depending on the command, |
| 213 | * either the xfs_bstat and xfs_inogrp structures are written out |
| 214 | * to userpace memory via bulkreq.ubuffer. Normally the compat |
| 215 | * functions and structure size are the correct ones to use ... |
| 216 | */ |
Darrick J. Wong | 8bfe9d1 | 2019-07-03 20:36:26 -0700 | [diff] [blame] | 217 | inumbers_fmt_pf inumbers_func = xfs_fsinumbers_fmt_compat; |
| 218 | bulkstat_one_fmt_pf bs_one_func = xfs_fsbulkstat_one_fmt_compat; |
Nick Bowler | 7ca860e | 2018-12-17 09:35:27 -0800 | [diff] [blame] | 219 | |
| 220 | #ifdef CONFIG_X86_X32 |
| 221 | if (in_x32_syscall()) { |
| 222 | /* |
| 223 | * ... but on x32 the input xfs_fsop_bulkreq has pointers |
| 224 | * which must be handled in the "compat" (32-bit) way, while |
| 225 | * the xfs_bstat and xfs_inogrp structures follow native 64- |
| 226 | * bit layout convention. So adjust accordingly, otherwise |
| 227 | * the data written out in compat layout will not match what |
| 228 | * x32 userspace expects. |
| 229 | */ |
Darrick J. Wong | 8bfe9d1 | 2019-07-03 20:36:26 -0700 | [diff] [blame] | 230 | inumbers_func = xfs_fsinumbers_fmt; |
| 231 | bs_one_func = xfs_fsbulkstat_one_fmt; |
Nick Bowler | 7ca860e | 2018-12-17 09:35:27 -0800 | [diff] [blame] | 232 | } |
| 233 | #endif |
| 234 | |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 235 | /* done = 1 if there are more stats to get and if bulkstat */ |
| 236 | /* should be called again (unused here, but used in dmapi) */ |
| 237 | |
| 238 | if (!capable(CAP_SYS_ADMIN)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 239 | return -EPERM; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 240 | |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 241 | if (xfs_is_shutdown(mp)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 242 | return -EIO; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 243 | |
| 244 | if (get_user(addr, &p32->lastip)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 245 | return -EFAULT; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 246 | bulkreq.lastip = compat_ptr(addr); |
| 247 | if (get_user(bulkreq.icount, &p32->icount) || |
| 248 | get_user(addr, &p32->ubuffer)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 249 | return -EFAULT; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 250 | bulkreq.ubuffer = compat_ptr(addr); |
| 251 | if (get_user(addr, &p32->ocount)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 252 | return -EFAULT; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 253 | bulkreq.ocount = compat_ptr(addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 255 | if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64))) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 256 | return -EFAULT; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 257 | |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 258 | if (bulkreq.icount <= 0) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 259 | return -EINVAL; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 260 | |
Lachlan McIlroy | cd57e59 | 2007-11-23 16:30:32 +1100 | [diff] [blame] | 261 | if (bulkreq.ubuffer == NULL) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 262 | return -EINVAL; |
Lachlan McIlroy | cd57e59 | 2007-11-23 16:30:32 +1100 | [diff] [blame] | 263 | |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 264 | breq.ubuffer = bulkreq.ubuffer; |
| 265 | breq.icount = bulkreq.icount; |
sandeen@sandeen.net | af819d2 | 2008-11-25 21:20:13 -0600 | [diff] [blame] | 266 | |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 267 | /* |
| 268 | * FSBULKSTAT_SINGLE expects that *lastip contains the inode number |
| 269 | * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect |
| 270 | * that *lastip contains either zero or the number of the last inode to |
| 271 | * be examined by the previous call and return results starting with |
| 272 | * the next inode after that. The new bulk request back end functions |
| 273 | * take the inode to start with, so we have to compute the startino |
| 274 | * parameter from lastino to maintain correct function. lastino == 0 |
| 275 | * is a special case because it has traditionally meant "first inode |
| 276 | * in filesystem". |
| 277 | */ |
| 278 | if (cmd == XFS_IOC_FSINUMBERS_32) { |
Darrick J. Wong | 677717f | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 279 | breq.startino = lastino ? lastino + 1 : 0; |
| 280 | error = xfs_inumbers(&breq, inumbers_func); |
| 281 | lastino = breq.startino - 1; |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 282 | } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) { |
| 283 | breq.startino = lastino; |
| 284 | breq.icount = 1; |
| 285 | error = xfs_bulkstat_one(&breq, bs_one_func); |
| 286 | lastino = breq.startino; |
sandeen@sandeen.net | af819d2 | 2008-11-25 21:20:13 -0600 | [diff] [blame] | 287 | } else if (cmd == XFS_IOC_FSBULKSTAT_32) { |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 288 | breq.startino = lastino ? lastino + 1 : 0; |
| 289 | error = xfs_bulkstat(&breq, bs_one_func); |
| 290 | lastino = breq.startino - 1; |
| 291 | } else { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 292 | error = -EINVAL; |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 293 | } |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 294 | if (error) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 295 | return error; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 296 | |
Darrick J. Wong | f16fe3e | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 297 | if (bulkreq.lastip != NULL && |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 298 | copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t))) |
Darrick J. Wong | f16fe3e | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 299 | return -EFAULT; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 300 | |
Darrick J. Wong | f16fe3e | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 301 | if (bulkreq.ocount != NULL && |
Darrick J. Wong | 2810bd6 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 302 | copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32))) |
Darrick J. Wong | f16fe3e | 2019-07-02 09:39:39 -0700 | [diff] [blame] | 303 | return -EFAULT; |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 304 | |
| 305 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | } |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 307 | |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 308 | STATIC int |
| 309 | xfs_compat_handlereq_copyin( |
| 310 | xfs_fsop_handlereq_t *hreq, |
| 311 | compat_xfs_fsop_handlereq_t __user *arg32) |
Michal Marek | 1fa503d | 2007-07-11 11:10:09 +1000 | [diff] [blame] | 312 | { |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 313 | compat_xfs_fsop_handlereq_t hreq32; |
Michal Marek | 1fa503d | 2007-07-11 11:10:09 +1000 | [diff] [blame] | 314 | |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 315 | if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t))) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 316 | return -EFAULT; |
Michal Marek | 1fa503d | 2007-07-11 11:10:09 +1000 | [diff] [blame] | 317 | |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 318 | hreq->fd = hreq32.fd; |
| 319 | hreq->path = compat_ptr(hreq32.path); |
| 320 | hreq->oflags = hreq32.oflags; |
| 321 | hreq->ihandle = compat_ptr(hreq32.ihandle); |
| 322 | hreq->ihandlen = hreq32.ihandlen; |
| 323 | hreq->ohandle = compat_ptr(hreq32.ohandle); |
| 324 | hreq->ohandlen = compat_ptr(hreq32.ohandlen); |
| 325 | |
| 326 | return 0; |
Michal Marek | 1fa503d | 2007-07-11 11:10:09 +1000 | [diff] [blame] | 327 | } |
| 328 | |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 329 | STATIC struct dentry * |
| 330 | xfs_compat_handlereq_to_dentry( |
| 331 | struct file *parfilp, |
| 332 | compat_xfs_fsop_handlereq_t *hreq) |
sandeen@sandeen.net | ebeecd2 | 2008-11-25 21:20:14 -0600 | [diff] [blame] | 333 | { |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 334 | return xfs_handle_to_dentry(parfilp, |
| 335 | compat_ptr(hreq->ihandle), hreq->ihandlen); |
sandeen@sandeen.net | ebeecd2 | 2008-11-25 21:20:14 -0600 | [diff] [blame] | 336 | } |
| 337 | |
| 338 | STATIC int |
| 339 | xfs_compat_attrlist_by_handle( |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 340 | struct file *parfilp, |
Christoph Hellwig | 53ac39f | 2020-02-26 17:30:41 -0800 | [diff] [blame] | 341 | compat_xfs_fsop_attrlist_handlereq_t __user *p) |
sandeen@sandeen.net | ebeecd2 | 2008-11-25 21:20:14 -0600 | [diff] [blame] | 342 | { |
sandeen@sandeen.net | ebeecd2 | 2008-11-25 21:20:14 -0600 | [diff] [blame] | 343 | compat_xfs_fsop_attrlist_handlereq_t al_hreq; |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 344 | struct dentry *dentry; |
Christoph Hellwig | 53ac39f | 2020-02-26 17:30:41 -0800 | [diff] [blame] | 345 | int error; |
sandeen@sandeen.net | ebeecd2 | 2008-11-25 21:20:14 -0600 | [diff] [blame] | 346 | |
| 347 | if (!capable(CAP_SYS_ADMIN)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 348 | return -EPERM; |
Christoph Hellwig | 53ac39f | 2020-02-26 17:30:41 -0800 | [diff] [blame] | 349 | if (copy_from_user(&al_hreq, p, sizeof(al_hreq))) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 350 | return -EFAULT; |
sandeen@sandeen.net | ebeecd2 | 2008-11-25 21:20:14 -0600 | [diff] [blame] | 351 | |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 352 | dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq); |
| 353 | if (IS_ERR(dentry)) |
| 354 | return PTR_ERR(dentry); |
sandeen@sandeen.net | ebeecd2 | 2008-11-25 21:20:14 -0600 | [diff] [blame] | 355 | |
Christoph Hellwig | eb241c7 | 2020-02-26 17:30:40 -0800 | [diff] [blame] | 356 | error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)), |
| 357 | compat_ptr(al_hreq.buffer), al_hreq.buflen, |
Christoph Hellwig | 53ac39f | 2020-02-26 17:30:41 -0800 | [diff] [blame] | 358 | al_hreq.flags, &p->pos); |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 359 | dput(dentry); |
| 360 | return error; |
sandeen@sandeen.net | ebeecd2 | 2008-11-25 21:20:14 -0600 | [diff] [blame] | 361 | } |
| 362 | |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 363 | STATIC int |
| 364 | xfs_compat_attrmulti_by_handle( |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 365 | struct file *parfilp, |
| 366 | void __user *arg) |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 367 | { |
| 368 | int error; |
| 369 | compat_xfs_attr_multiop_t *ops; |
| 370 | compat_xfs_fsop_attrmulti_handlereq_t am_hreq; |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 371 | struct dentry *dentry; |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 372 | unsigned int i, size; |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 373 | |
| 374 | if (!capable(CAP_SYS_ADMIN)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 375 | return -EPERM; |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 376 | if (copy_from_user(&am_hreq, arg, |
| 377 | sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 378 | return -EFAULT; |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 379 | |
Zhitong Wang | fda168c | 2010-03-23 09:51:22 +1100 | [diff] [blame] | 380 | /* overflow check */ |
| 381 | if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t)) |
| 382 | return -E2BIG; |
| 383 | |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 384 | dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq); |
| 385 | if (IS_ERR(dentry)) |
| 386 | return PTR_ERR(dentry); |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 387 | |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 388 | error = -E2BIG; |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 389 | size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); |
| 390 | if (!size || size > 16 * PAGE_SIZE) |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 391 | goto out_dput; |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 392 | |
Li Zefan | 0e639bd | 2009-04-08 15:08:04 +0800 | [diff] [blame] | 393 | ops = memdup_user(compat_ptr(am_hreq.ops), size); |
| 394 | if (IS_ERR(ops)) { |
Brian Foster | 4d94902 | 2015-01-22 10:04:24 +1100 | [diff] [blame] | 395 | error = PTR_ERR(ops); |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 396 | goto out_dput; |
Li Zefan | 0e639bd | 2009-04-08 15:08:04 +0800 | [diff] [blame] | 397 | } |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 398 | |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 399 | error = 0; |
| 400 | for (i = 0; i < am_hreq.opcount; i++) { |
Christoph Hellwig | d0ce6439 | 2020-02-26 17:30:31 -0800 | [diff] [blame] | 401 | ops[i].am_error = xfs_ioc_attrmulti_one(parfilp, |
| 402 | d_inode(dentry), ops[i].am_opcode, |
| 403 | compat_ptr(ops[i].am_attrname), |
| 404 | compat_ptr(ops[i].am_attrvalue), |
| 405 | &ops[i].am_length, ops[i].am_flags); |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 406 | } |
| 407 | |
| 408 | if (copy_to_user(compat_ptr(am_hreq.ops), ops, size)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 409 | error = -EFAULT; |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 410 | |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 411 | kfree(ops); |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 412 | out_dput: |
| 413 | dput(dentry); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 414 | return error; |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 415 | } |
| 416 | |
Christoph Hellwig | 4d4be48 | 2008-12-09 04:47:33 -0500 | [diff] [blame] | 417 | long |
| 418 | xfs_file_compat_ioctl( |
| 419 | struct file *filp, |
| 420 | unsigned cmd, |
| 421 | unsigned long p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 423 | struct inode *inode = file_inode(filp); |
Christoph Hellwig | 4d4be48 | 2008-12-09 04:47:33 -0500 | [diff] [blame] | 424 | struct xfs_inode *ip = XFS_I(inode); |
Christoph Hellwig | 4529e6d | 2019-08-15 23:41:06 -0700 | [diff] [blame] | 425 | void __user *arg = compat_ptr(p); |
Christoph Hellwig | 4d4be48 | 2008-12-09 04:47:33 -0500 | [diff] [blame] | 426 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | |
Christoph Hellwig | cca28fb | 2010-06-24 11:57:09 +1000 | [diff] [blame] | 428 | trace_xfs_file_compat_ioctl(ip); |
Christoph Hellwig | 4d4be48 | 2008-12-09 04:47:33 -0500 | [diff] [blame] | 429 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | switch (cmd) { |
Nick Bowler | a9d25bd | 2018-12-17 09:35:27 -0800 | [diff] [blame] | 431 | #if defined(BROKEN_X86_ALIGNMENT) |
Michal Marek | 547e00c | 2007-07-11 11:09:57 +1000 | [diff] [blame] | 432 | case XFS_IOC_FSGEOMETRY_V1_32: |
Christoph Hellwig | f69e809 | 2021-02-03 18:30:10 +0100 | [diff] [blame] | 433 | return xfs_compat_ioc_fsgeometry_v1(ip->i_mount, arg); |
sandeen@sandeen.net | 471d591 | 2008-11-25 21:20:10 -0600 | [diff] [blame] | 434 | case XFS_IOC_FSGROWFSDATA_32: { |
| 435 | struct xfs_growfs_data in; |
| 436 | |
| 437 | if (xfs_compat_growfs_data_copyin(&in, arg)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 438 | return -EFAULT; |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 439 | error = mnt_want_write_file(filp); |
| 440 | if (error) |
| 441 | return error; |
Christoph Hellwig | f69e809 | 2021-02-03 18:30:10 +0100 | [diff] [blame] | 442 | error = xfs_growfs_data(ip->i_mount, &in); |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 443 | mnt_drop_write_file(filp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 444 | return error; |
sandeen@sandeen.net | 471d591 | 2008-11-25 21:20:10 -0600 | [diff] [blame] | 445 | } |
| 446 | case XFS_IOC_FSGROWFSRT_32: { |
| 447 | struct xfs_growfs_rt in; |
| 448 | |
| 449 | if (xfs_compat_growfs_rt_copyin(&in, arg)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 450 | return -EFAULT; |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 451 | error = mnt_want_write_file(filp); |
| 452 | if (error) |
| 453 | return error; |
Christoph Hellwig | f69e809 | 2021-02-03 18:30:10 +0100 | [diff] [blame] | 454 | error = xfs_growfs_rt(ip->i_mount, &in); |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 455 | mnt_drop_write_file(filp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 456 | return error; |
sandeen@sandeen.net | 471d591 | 2008-11-25 21:20:10 -0600 | [diff] [blame] | 457 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | #endif |
sandeen@sandeen.net | e5d412f | 2008-11-25 21:20:17 -0600 | [diff] [blame] | 459 | /* long changes size, but xfs only copiese out 32 bits */ |
sandeen@sandeen.net | e5d412f | 2008-11-25 21:20:17 -0600 | [diff] [blame] | 460 | case XFS_IOC_GETVERSION_32: |
| 461 | cmd = _NATIVE_IOC(cmd, long); |
Christoph Hellwig | 4d4be48 | 2008-12-09 04:47:33 -0500 | [diff] [blame] | 462 | return xfs_file_ioctl(filp, cmd, p); |
Christoph Hellwig | 3725867 | 2009-09-01 14:03:08 -0400 | [diff] [blame] | 463 | case XFS_IOC_SWAPEXT_32: { |
sandeen@sandeen.net | e5d412f | 2008-11-25 21:20:17 -0600 | [diff] [blame] | 464 | struct xfs_swapext sxp; |
| 465 | struct compat_xfs_swapext __user *sxu = arg; |
| 466 | |
| 467 | /* Bulk copy in up to the sx_stat field, then copy bstat */ |
| 468 | if (copy_from_user(&sxp, sxu, |
| 469 | offsetof(struct xfs_swapext, sx_stat)) || |
| 470 | xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 471 | return -EFAULT; |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 472 | error = mnt_want_write_file(filp); |
| 473 | if (error) |
| 474 | return error; |
Dave Chinner | a133d95 | 2013-08-12 20:49:48 +1000 | [diff] [blame] | 475 | error = xfs_ioc_swapext(&sxp); |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 476 | mnt_drop_write_file(filp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 477 | return error; |
sandeen@sandeen.net | e5d412f | 2008-11-25 21:20:17 -0600 | [diff] [blame] | 478 | } |
Michal Marek | faa63e9 | 2007-07-11 11:10:19 +1000 | [diff] [blame] | 479 | case XFS_IOC_FSBULKSTAT_32: |
| 480 | case XFS_IOC_FSBULKSTAT_SINGLE_32: |
| 481 | case XFS_IOC_FSINUMBERS_32: |
Christoph Hellwig | f736d93 | 2021-01-21 14:19:58 +0100 | [diff] [blame] | 482 | return xfs_compat_ioc_fsbulkstat(filp, cmd, arg); |
Michal Marek | 1fa503d | 2007-07-11 11:10:09 +1000 | [diff] [blame] | 483 | case XFS_IOC_FD_TO_HANDLE_32: |
| 484 | case XFS_IOC_PATH_TO_HANDLE_32: |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 485 | case XFS_IOC_PATH_TO_FSHANDLE_32: { |
| 486 | struct xfs_fsop_handlereq hreq; |
| 487 | |
| 488 | if (xfs_compat_handlereq_copyin(&hreq, arg)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 489 | return -EFAULT; |
Michal Marek | 1fa503d | 2007-07-11 11:10:09 +1000 | [diff] [blame] | 490 | cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq); |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 491 | return xfs_find_handle(cmd, &hreq); |
| 492 | } |
| 493 | case XFS_IOC_OPEN_BY_HANDLE_32: { |
| 494 | struct xfs_fsop_handlereq hreq; |
| 495 | |
| 496 | if (xfs_compat_handlereq_copyin(&hreq, arg)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 497 | return -EFAULT; |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 498 | return xfs_open_by_handle(filp, &hreq); |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 499 | } |
| 500 | case XFS_IOC_READLINK_BY_HANDLE_32: { |
| 501 | struct xfs_fsop_handlereq hreq; |
| 502 | |
| 503 | if (xfs_compat_handlereq_copyin(&hreq, arg)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 504 | return -EFAULT; |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 505 | return xfs_readlink_by_handle(filp, &hreq); |
sandeen@sandeen.net | d5547f9 | 2008-11-25 21:20:08 -0600 | [diff] [blame] | 506 | } |
sandeen@sandeen.net | ebeecd2 | 2008-11-25 21:20:14 -0600 | [diff] [blame] | 507 | case XFS_IOC_ATTRLIST_BY_HANDLE_32: |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 508 | return xfs_compat_attrlist_by_handle(filp, arg); |
sandeen@sandeen.net | 2875097 | 2008-11-25 21:20:15 -0600 | [diff] [blame] | 509 | case XFS_IOC_ATTRMULTI_BY_HANDLE_32: |
Christoph Hellwig | ab596ad | 2009-01-19 02:02:57 +0100 | [diff] [blame] | 510 | return xfs_compat_attrmulti_by_handle(filp, arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | default: |
Christoph Hellwig | 314e01a | 2019-08-15 23:41:06 -0700 | [diff] [blame] | 512 | /* try the native version */ |
Christoph Hellwig | 4529e6d | 2019-08-15 23:41:06 -0700 | [diff] [blame] | 513 | return xfs_file_ioctl(filp, cmd, (unsigned long)arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | } |