Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2014 Christoph Hellwig. |
| 4 | */ |
| 5 | #include "xfs.h" |
Darrick J. Wong | 5467b34 | 2019-06-28 19:25:35 -0700 | [diff] [blame] | 6 | #include "xfs_shared.h" |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 7 | #include "xfs_format.h" |
| 8 | #include "xfs_log_format.h" |
| 9 | #include "xfs_trans_resv.h" |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 10 | #include "xfs_mount.h" |
| 11 | #include "xfs_inode.h" |
| 12 | #include "xfs_trans.h" |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 13 | #include "xfs_bmap.h" |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 14 | #include "xfs_iomap.h" |
Ben Dooks (Codethink) | 1aa6300 | 2019-10-22 09:53:50 -0700 | [diff] [blame] | 15 | #include "xfs_pnfs.h" |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 16 | |
| 17 | /* |
Christoph Hellwig | 781355c | 2015-02-16 11:59:50 +1100 | [diff] [blame] | 18 | * Ensure that we do not have any outstanding pNFS layouts that can be used by |
| 19 | * clients to directly read from or write to this inode. This must be called |
| 20 | * before every operation that can remove blocks from the extent map. |
| 21 | * Additionally we call it during the write operation, where aren't concerned |
| 22 | * about exposing unallocated blocks but just want to provide basic |
| 23 | * synchronization between a local writer and pNFS clients. mmap writes would |
| 24 | * also benefit from this sort of synchronization, but due to the tricky locking |
| 25 | * rules in the page fault path we don't bother. |
| 26 | */ |
| 27 | int |
Dan Williams | 69eb5fa | 2018-03-20 14:42:38 -0700 | [diff] [blame] | 28 | xfs_break_leased_layouts( |
Christoph Hellwig | 781355c | 2015-02-16 11:59:50 +1100 | [diff] [blame] | 29 | struct inode *inode, |
Dan Williams | 69eb5fa | 2018-03-20 14:42:38 -0700 | [diff] [blame] | 30 | uint *iolock, |
| 31 | bool *did_unlock) |
Christoph Hellwig | 781355c | 2015-02-16 11:59:50 +1100 | [diff] [blame] | 32 | { |
| 33 | struct xfs_inode *ip = XFS_I(inode); |
| 34 | int error; |
| 35 | |
Ira Weiny | b682716 | 2019-08-19 18:15:28 -0700 | [diff] [blame] | 36 | while ((error = break_layout(inode, false)) == -EWOULDBLOCK) { |
Christoph Hellwig | 781355c | 2015-02-16 11:59:50 +1100 | [diff] [blame] | 37 | xfs_iunlock(ip, *iolock); |
Dan Williams | 69eb5fa | 2018-03-20 14:42:38 -0700 | [diff] [blame] | 38 | *did_unlock = true; |
Christoph Hellwig | 781355c | 2015-02-16 11:59:50 +1100 | [diff] [blame] | 39 | error = break_layout(inode, true); |
Dan Williams | c63a8ea | 2018-03-12 14:12:29 -0700 | [diff] [blame] | 40 | *iolock &= ~XFS_IOLOCK_SHARED; |
| 41 | *iolock |= XFS_IOLOCK_EXCL; |
Christoph Hellwig | 781355c | 2015-02-16 11:59:50 +1100 | [diff] [blame] | 42 | xfs_ilock(ip, *iolock); |
| 43 | } |
| 44 | |
| 45 | return error; |
| 46 | } |
| 47 | |
| 48 | /* |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 49 | * Get a unique ID including its location so that the client can identify |
| 50 | * the exported device. |
| 51 | */ |
| 52 | int |
| 53 | xfs_fs_get_uuid( |
| 54 | struct super_block *sb, |
| 55 | u8 *buf, |
| 56 | u32 *len, |
| 57 | u64 *offset) |
| 58 | { |
| 59 | struct xfs_mount *mp = XFS_M(sb); |
| 60 | |
Eric Sandeen | ec43f6d | 2020-04-27 11:00:42 -0700 | [diff] [blame] | 61 | xfs_notice_once(mp, |
| 62 | "Using experimental pNFS feature, use at your own risk!"); |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 63 | |
| 64 | if (*len < sizeof(uuid_t)) |
| 65 | return -EINVAL; |
| 66 | |
| 67 | memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t)); |
| 68 | *len = sizeof(uuid_t); |
| 69 | *offset = offsetof(struct xfs_dsb, sb_uuid); |
| 70 | return 0; |
| 71 | } |
| 72 | |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 73 | /* |
Dave Chinner | b39a0463 | 2022-01-31 13:20:10 -0800 | [diff] [blame] | 74 | * We cannot use file based VFS helpers such as file_modified() to update |
| 75 | * inode state as we modify the data/metadata in the inode here. Hence we have |
| 76 | * to open code the timestamp updates and SUID/SGID stripping. We also need |
| 77 | * to set the inode prealloc flag to ensure that the extents we allocate are not |
| 78 | * removed if the inode is reclaimed from memory before xfs_fs_block_commit() |
| 79 | * is from the client to indicate that data has been written and the file size |
| 80 | * can be extended. |
| 81 | */ |
| 82 | static int |
| 83 | xfs_fs_map_update_inode( |
| 84 | struct xfs_inode *ip) |
| 85 | { |
| 86 | struct xfs_trans *tp; |
| 87 | int error; |
| 88 | |
| 89 | error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid, |
| 90 | 0, 0, 0, &tp); |
| 91 | if (error) |
| 92 | return error; |
| 93 | |
| 94 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
| 95 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
| 96 | |
| 97 | VFS_I(ip)->i_mode &= ~S_ISUID; |
| 98 | if (VFS_I(ip)->i_mode & S_IXGRP) |
| 99 | VFS_I(ip)->i_mode &= ~S_ISGID; |
| 100 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
| 101 | ip->i_diflags |= XFS_DIFLAG_PREALLOC; |
| 102 | |
| 103 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
| 104 | return xfs_trans_commit(tp); |
| 105 | } |
| 106 | |
| 107 | /* |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 108 | * Get a layout for the pNFS client. |
| 109 | */ |
| 110 | int |
| 111 | xfs_fs_map_blocks( |
| 112 | struct inode *inode, |
| 113 | loff_t offset, |
| 114 | u64 length, |
| 115 | struct iomap *iomap, |
| 116 | bool write, |
| 117 | u32 *device_generation) |
| 118 | { |
| 119 | struct xfs_inode *ip = XFS_I(inode); |
| 120 | struct xfs_mount *mp = ip->i_mount; |
| 121 | struct xfs_bmbt_irec imap; |
| 122 | xfs_fileoff_t offset_fsb, end_fsb; |
| 123 | loff_t limit; |
| 124 | int bmapi_flags = XFS_BMAPI_ENTIRE; |
| 125 | int nimaps = 1; |
| 126 | uint lock_flags; |
| 127 | int error = 0; |
| 128 | |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 129 | if (xfs_is_shutdown(mp)) |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 130 | return -EIO; |
| 131 | |
| 132 | /* |
| 133 | * We can't export inodes residing on the realtime device. The realtime |
| 134 | * device doesn't have a UUID to identify it, so the client has no way |
| 135 | * to find it. |
| 136 | */ |
| 137 | if (XFS_IS_REALTIME_INODE(ip)) |
| 138 | return -ENXIO; |
| 139 | |
| 140 | /* |
Darrick J. Wong | 46eeb52 | 2016-10-03 09:11:16 -0700 | [diff] [blame] | 141 | * The pNFS block layout spec actually supports reflink like |
| 142 | * functionality, but the Linux pNFS server doesn't implement it yet. |
| 143 | */ |
| 144 | if (xfs_is_reflink_inode(ip)) |
| 145 | return -ENXIO; |
| 146 | |
| 147 | /* |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 148 | * Lock out any other I/O before we flush and invalidate the pagecache, |
| 149 | * and then hand out a layout to the remote system. This is very |
| 150 | * similar to direct I/O, except that the synchronization is much more |
Dan Williams | 69eb5fa | 2018-03-20 14:42:38 -0700 | [diff] [blame] | 151 | * complicated. See the comment near xfs_break_leased_layouts |
| 152 | * for a detailed explanation. |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 153 | */ |
| 154 | xfs_ilock(ip, XFS_IOLOCK_EXCL); |
| 155 | |
| 156 | error = -EINVAL; |
| 157 | limit = mp->m_super->s_maxbytes; |
| 158 | if (!write) |
| 159 | limit = max(limit, round_up(i_size_read(inode), |
| 160 | inode->i_sb->s_blocksize)); |
| 161 | if (offset > limit) |
| 162 | goto out_unlock; |
| 163 | if (offset > limit - length) |
| 164 | length = limit - offset; |
| 165 | |
| 166 | error = filemap_write_and_wait(inode->i_mapping); |
| 167 | if (error) |
| 168 | goto out_unlock; |
| 169 | error = invalidate_inode_pages2(inode->i_mapping); |
| 170 | if (WARN_ON_ONCE(error)) |
Christoph Hellwig | 2bd3fa7 | 2020-11-11 08:07:37 -0800 | [diff] [blame] | 171 | goto out_unlock; |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 172 | |
| 173 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length); |
| 174 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 175 | |
| 176 | lock_flags = xfs_ilock_data_map_shared(ip); |
| 177 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, |
| 178 | &imap, &nimaps, bmapi_flags); |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 179 | |
Christoph Hellwig | 88cdb71 | 2019-10-30 12:24:58 -0700 | [diff] [blame] | 180 | ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK); |
| 181 | |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 182 | if (!error && write && |
| 183 | (!nimaps || imap.br_startblock == HOLESTARTBLOCK)) { |
| 184 | if (offset + length > XFS_ISIZE(ip)) |
| 185 | end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb); |
| 186 | else if (nimaps && imap.br_startblock == HOLESTARTBLOCK) |
| 187 | end_fsb = min(end_fsb, imap.br_startoff + |
| 188 | imap.br_blockcount); |
| 189 | xfs_iunlock(ip, lock_flags); |
| 190 | |
| 191 | error = xfs_iomap_write_direct(ip, offset_fsb, |
Christoph Hellwig | 952da06 | 2021-11-29 11:21:58 +0100 | [diff] [blame] | 192 | end_fsb - offset_fsb, 0, &imap); |
Christoph Hellwig | 307cdb5 | 2019-10-30 12:24:58 -0700 | [diff] [blame] | 193 | if (error) |
| 194 | goto out_unlock; |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 195 | |
Christoph Hellwig | 307cdb5 | 2019-10-30 12:24:58 -0700 | [diff] [blame] | 196 | /* |
| 197 | * Ensure the next transaction is committed synchronously so |
| 198 | * that the blocks allocated and handed out to the client are |
| 199 | * guaranteed to be present even after a server crash. |
| 200 | */ |
Dave Chinner | b39a0463 | 2022-01-31 13:20:10 -0800 | [diff] [blame] | 201 | error = xfs_fs_map_update_inode(ip); |
Dave Chinner | 472c6e4 | 2022-01-31 13:20:08 -0800 | [diff] [blame] | 202 | if (!error) |
| 203 | error = xfs_log_force_inode(ip); |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 204 | if (error) |
| 205 | goto out_unlock; |
Dave Chinner | 472c6e4 | 2022-01-31 13:20:08 -0800 | [diff] [blame] | 206 | |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 207 | } else { |
| 208 | xfs_iunlock(ip, lock_flags); |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 209 | } |
| 210 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
| 211 | |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 212 | error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0, 0); |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 213 | *device_generation = mp->m_generation; |
| 214 | return error; |
| 215 | out_unlock: |
| 216 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
| 217 | return error; |
| 218 | } |
| 219 | |
| 220 | /* |
| 221 | * Ensure the size update falls into a valid allocated block. |
| 222 | */ |
| 223 | static int |
| 224 | xfs_pnfs_validate_isize( |
| 225 | struct xfs_inode *ip, |
| 226 | xfs_off_t isize) |
| 227 | { |
| 228 | struct xfs_bmbt_irec imap; |
| 229 | int nimaps = 1; |
| 230 | int error = 0; |
| 231 | |
| 232 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
| 233 | error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1, |
| 234 | &imap, &nimaps, 0); |
| 235 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
| 236 | if (error) |
| 237 | return error; |
| 238 | |
| 239 | if (imap.br_startblock == HOLESTARTBLOCK || |
| 240 | imap.br_startblock == DELAYSTARTBLOCK || |
| 241 | imap.br_state == XFS_EXT_UNWRITTEN) |
| 242 | return -EIO; |
| 243 | return 0; |
| 244 | } |
| 245 | |
| 246 | /* |
| 247 | * Make sure the blocks described by maps are stable on disk. This includes |
| 248 | * converting any unwritten extents, flushing the disk cache and updating the |
| 249 | * time stamps. |
| 250 | * |
| 251 | * Note that we rely on the caller to always send us a timestamp update so that |
| 252 | * we always commit a transaction here. If that stops being true we will have |
| 253 | * to manually flush the cache here similar to what the fsync code path does |
| 254 | * for datasyncs on files that have no dirty metadata. |
| 255 | */ |
| 256 | int |
| 257 | xfs_fs_commit_blocks( |
| 258 | struct inode *inode, |
| 259 | struct iomap *maps, |
| 260 | int nr_maps, |
| 261 | struct iattr *iattr) |
| 262 | { |
| 263 | struct xfs_inode *ip = XFS_I(inode); |
| 264 | struct xfs_mount *mp = ip->i_mount; |
| 265 | struct xfs_trans *tp; |
| 266 | bool update_isize = false; |
| 267 | int error, i; |
| 268 | loff_t size; |
| 269 | |
| 270 | ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)); |
| 271 | |
| 272 | xfs_ilock(ip, XFS_IOLOCK_EXCL); |
| 273 | |
| 274 | size = i_size_read(inode); |
| 275 | if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) { |
| 276 | update_isize = true; |
| 277 | size = iattr->ia_size; |
| 278 | } |
| 279 | |
| 280 | for (i = 0; i < nr_maps; i++) { |
| 281 | u64 start, length, end; |
| 282 | |
| 283 | start = maps[i].offset; |
| 284 | if (start > size) |
| 285 | continue; |
| 286 | |
| 287 | end = start + maps[i].length; |
| 288 | if (end > size) |
| 289 | end = size; |
| 290 | |
| 291 | length = end - start; |
| 292 | if (!length) |
| 293 | continue; |
Dave Chinner | b39a0463 | 2022-01-31 13:20:10 -0800 | [diff] [blame] | 294 | |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 295 | /* |
| 296 | * Make sure reads through the pagecache see the new data. |
| 297 | */ |
| 298 | error = invalidate_inode_pages2_range(inode->i_mapping, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 299 | start >> PAGE_SHIFT, |
| 300 | (end - 1) >> PAGE_SHIFT); |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 301 | WARN_ON_ONCE(error); |
| 302 | |
Eryu Guan | ee70daa | 2017-09-21 11:26:18 -0700 | [diff] [blame] | 303 | error = xfs_iomap_write_unwritten(ip, start, length, false); |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 304 | if (error) |
| 305 | goto out_drop_iolock; |
| 306 | } |
| 307 | |
| 308 | if (update_isize) { |
| 309 | error = xfs_pnfs_validate_isize(ip, size); |
| 310 | if (error) |
| 311 | goto out_drop_iolock; |
| 312 | } |
| 313 | |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 314 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); |
| 315 | if (error) |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 316 | goto out_drop_iolock; |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 317 | |
| 318 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
| 319 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
| 320 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
| 321 | |
| 322 | xfs_setattr_time(ip, iattr); |
| 323 | if (update_isize) { |
| 324 | i_size_write(inode, iattr->ia_size); |
Christoph Hellwig | 13d2c10 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 325 | ip->i_disk_size = iattr->ia_size; |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | xfs_trans_set_sync(tp); |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 329 | error = xfs_trans_commit(tp); |
Christoph Hellwig | 5278511 | 2015-02-16 11:49:23 +1100 | [diff] [blame] | 330 | |
| 331 | out_drop_iolock: |
| 332 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
| 333 | return error; |
| 334 | } |