Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Olaf Weber | 3e57ecf | 2006-06-09 14:48:12 +1000 | [diff] [blame] | 3 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. |
Christoph Hellwig | 98c1a7c0 | 2018-07-11 22:26:06 -0700 | [diff] [blame] | 4 | * Copyright (c) 2016-2018 Christoph Hellwig. |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include "xfs.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include "xfs_fs.h" |
Dave Chinner | 70a9883 | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 9 | #include "xfs_shared.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 10 | #include "xfs_format.h" |
| 11 | #include "xfs_log_format.h" |
| 12 | #include "xfs_trans_resv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include "xfs_mount.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include "xfs_inode.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 15 | #include "xfs_btree.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 16 | #include "xfs_bmap_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include "xfs_bmap.h" |
Dave Chinner | 6898811 | 2013-08-12 20:49:42 +1000 | [diff] [blame] | 18 | #include "xfs_bmap_util.h" |
Darrick J. Wong | e9e899a | 2017-10-31 12:04:49 -0700 | [diff] [blame] | 19 | #include "xfs_errortag.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include "xfs_error.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 21 | #include "xfs_trans.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include "xfs_trans_space.h" |
Christoph Hellwig | a39e596 | 2017-11-01 16:36:47 +0100 | [diff] [blame] | 23 | #include "xfs_inode_item.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include "xfs_iomap.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 25 | #include "xfs_trace.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 26 | #include "xfs_quota.h" |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 27 | #include "xfs_dquot_item.h" |
| 28 | #include "xfs_dquot.h" |
Darrick J. Wong | 2a06705 | 2016-10-03 09:11:33 -0700 | [diff] [blame] | 29 | #include "xfs_reflink.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Christoph Hellwig | 5da8a07 | 2019-10-28 08:41:44 -0700 | [diff] [blame] | 31 | #define XFS_ALLOC_ALIGN(mp, off) \ |
| 32 | (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 34 | static int |
| 35 | xfs_alert_fsblock_zero( |
| 36 | xfs_inode_t *ip, |
| 37 | xfs_bmbt_irec_t *imap) |
| 38 | { |
| 39 | xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, |
| 40 | "Access to block zero in inode %llu " |
| 41 | "start_block: %llx start_off: %llx " |
| 42 | "blkcnt: %llx extent-state: %x", |
| 43 | (unsigned long long)ip->i_ino, |
| 44 | (unsigned long long)imap->br_startblock, |
| 45 | (unsigned long long)imap->br_startoff, |
| 46 | (unsigned long long)imap->br_blockcount, |
| 47 | imap->br_state); |
| 48 | return -EFSCORRUPTED; |
| 49 | } |
| 50 | |
| 51 | int |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 52 | xfs_bmbt_to_iomap( |
| 53 | struct xfs_inode *ip, |
| 54 | struct iomap *iomap, |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 55 | struct xfs_bmbt_irec *imap, |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 56 | unsigned int mapping_flags, |
| 57 | u16 iomap_flags) |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 58 | { |
| 59 | struct xfs_mount *mp = ip->i_mount; |
Christoph Hellwig | 30fa529 | 2019-10-24 22:25:38 -0700 | [diff] [blame] | 60 | struct xfs_buftarg *target = xfs_inode_buftarg(ip); |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 61 | |
Christoph Hellwig | eb77b23 | 2019-09-03 08:13:13 -0700 | [diff] [blame] | 62 | if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 63 | return xfs_alert_fsblock_zero(ip, imap); |
| 64 | |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 65 | if (imap->br_startblock == HOLESTARTBLOCK) { |
Andreas Gruenbacher | 19fe5f6 | 2017-10-01 17:55:54 -0400 | [diff] [blame] | 66 | iomap->addr = IOMAP_NULL_ADDR; |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 67 | iomap->type = IOMAP_HOLE; |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 68 | } else if (imap->br_startblock == DELAYSTARTBLOCK || |
| 69 | isnullstartblock(imap->br_startblock)) { |
Andreas Gruenbacher | 19fe5f6 | 2017-10-01 17:55:54 -0400 | [diff] [blame] | 70 | iomap->addr = IOMAP_NULL_ADDR; |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 71 | iomap->type = IOMAP_DELALLOC; |
| 72 | } else { |
Andreas Gruenbacher | 19fe5f6 | 2017-10-01 17:55:54 -0400 | [diff] [blame] | 73 | iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock)); |
Christoph Hellwig | de20511 | 2021-11-29 11:22:00 +0100 | [diff] [blame] | 74 | if (mapping_flags & IOMAP_DAX) |
| 75 | iomap->addr += target->bt_dax_part_off; |
| 76 | |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 77 | if (imap->br_state == XFS_EXT_UNWRITTEN) |
| 78 | iomap->type = IOMAP_UNWRITTEN; |
| 79 | else |
| 80 | iomap->type = IOMAP_MAPPED; |
Christoph Hellwig | de20511 | 2021-11-29 11:22:00 +0100 | [diff] [blame] | 81 | |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 82 | } |
| 83 | iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); |
| 84 | iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); |
Christoph Hellwig | de20511 | 2021-11-29 11:22:00 +0100 | [diff] [blame] | 85 | if (mapping_flags & IOMAP_DAX) |
| 86 | iomap->dax_dev = target->bt_daxdev; |
| 87 | else |
| 88 | iomap->bdev = target->bt_bdev; |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 89 | iomap->flags = iomap_flags; |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 90 | |
| 91 | if (xfs_ipincount(ip) && |
| 92 | (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) |
| 93 | iomap->flags |= IOMAP_F_DIRTY; |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 94 | return 0; |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 95 | } |
| 96 | |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 97 | static void |
| 98 | xfs_hole_to_iomap( |
| 99 | struct xfs_inode *ip, |
| 100 | struct iomap *iomap, |
| 101 | xfs_fileoff_t offset_fsb, |
| 102 | xfs_fileoff_t end_fsb) |
| 103 | { |
Christoph Hellwig | 30fa529 | 2019-10-24 22:25:38 -0700 | [diff] [blame] | 104 | struct xfs_buftarg *target = xfs_inode_buftarg(ip); |
| 105 | |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 106 | iomap->addr = IOMAP_NULL_ADDR; |
| 107 | iomap->type = IOMAP_HOLE; |
| 108 | iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb); |
| 109 | iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb); |
Christoph Hellwig | 30fa529 | 2019-10-24 22:25:38 -0700 | [diff] [blame] | 110 | iomap->bdev = target->bt_bdev; |
| 111 | iomap->dax_dev = target->bt_daxdev; |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 112 | } |
| 113 | |
Christoph Hellwig | 4356822 | 2019-10-19 09:09:44 -0700 | [diff] [blame] | 114 | static inline xfs_fileoff_t |
| 115 | xfs_iomap_end_fsb( |
| 116 | struct xfs_mount *mp, |
| 117 | loff_t offset, |
| 118 | loff_t count) |
| 119 | { |
| 120 | ASSERT(offset <= mp->m_super->s_maxbytes); |
| 121 | return min(XFS_B_TO_FSB(mp, offset + count), |
| 122 | XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); |
| 123 | } |
| 124 | |
Christoph Hellwig | 49bbf8c | 2019-10-30 12:24:57 -0700 | [diff] [blame] | 125 | static xfs_extlen_t |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 126 | xfs_eof_alignment( |
Christoph Hellwig | 57c4944 | 2019-10-30 12:24:58 -0700 | [diff] [blame] | 127 | struct xfs_inode *ip) |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 128 | { |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 129 | struct xfs_mount *mp = ip->i_mount; |
| 130 | xfs_extlen_t align = 0; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 131 | |
Christoph Hellwig | bf322d9 | 2011-12-18 20:00:05 +0000 | [diff] [blame] | 132 | if (!XFS_IS_REALTIME_INODE(ip)) { |
| 133 | /* |
| 134 | * Round up the allocation request to a stripe unit |
| 135 | * (m_dalign) boundary if the file size is >= stripe unit |
| 136 | * size, and we are allocating past the allocation eof. |
| 137 | * |
| 138 | * If mounted with the "-o swalloc" option the alignment is |
| 139 | * increased from the strip unit size to the stripe width. |
| 140 | */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 141 | if (mp->m_swidth && xfs_has_swalloc(mp)) |
Christoph Hellwig | bf322d9 | 2011-12-18 20:00:05 +0000 | [diff] [blame] | 142 | align = mp->m_swidth; |
| 143 | else if (mp->m_dalign) |
| 144 | align = mp->m_dalign; |
| 145 | |
Peter Watkins | 76b5730 | 2014-12-04 09:30:51 +1100 | [diff] [blame] | 146 | if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) |
| 147 | align = 0; |
Christoph Hellwig | bf322d9 | 2011-12-18 20:00:05 +0000 | [diff] [blame] | 148 | } |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 149 | |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 150 | return align; |
| 151 | } |
| 152 | |
Christoph Hellwig | ae7e403 | 2019-10-30 12:24:57 -0700 | [diff] [blame] | 153 | /* |
| 154 | * Check if last_fsb is outside the last extent, and if so grow it to the next |
| 155 | * stripe unit boundary. |
| 156 | */ |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 157 | xfs_fileoff_t |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 158 | xfs_iomap_eof_align_last_fsb( |
| 159 | struct xfs_inode *ip, |
Christoph Hellwig | ae7e403 | 2019-10-30 12:24:57 -0700 | [diff] [blame] | 160 | xfs_fileoff_t end_fsb) |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 161 | { |
Christoph Hellwig | ae7e403 | 2019-10-30 12:24:57 -0700 | [diff] [blame] | 162 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
| 163 | xfs_extlen_t extsz = xfs_get_extsz_hint(ip); |
Christoph Hellwig | 57c4944 | 2019-10-30 12:24:58 -0700 | [diff] [blame] | 164 | xfs_extlen_t align = xfs_eof_alignment(ip); |
Christoph Hellwig | ae7e403 | 2019-10-30 12:24:57 -0700 | [diff] [blame] | 165 | struct xfs_bmbt_irec irec; |
| 166 | struct xfs_iext_cursor icur; |
| 167 | |
Christoph Hellwig | b2197a3 | 2021-04-13 11:15:12 -0700 | [diff] [blame] | 168 | ASSERT(!xfs_need_iread_extents(ifp)); |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 169 | |
Christoph Hellwig | 57c4944 | 2019-10-30 12:24:58 -0700 | [diff] [blame] | 170 | /* |
| 171 | * Always round up the allocation request to the extent hint boundary. |
| 172 | */ |
| 173 | if (extsz) { |
| 174 | if (align) |
| 175 | align = roundup_64(align, extsz); |
| 176 | else |
| 177 | align = extsz; |
| 178 | } |
| 179 | |
Peter Watkins | 76b5730 | 2014-12-04 09:30:51 +1100 | [diff] [blame] | 180 | if (align) { |
Christoph Hellwig | ae7e403 | 2019-10-30 12:24:57 -0700 | [diff] [blame] | 181 | xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align); |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 182 | |
Christoph Hellwig | ae7e403 | 2019-10-30 12:24:57 -0700 | [diff] [blame] | 183 | xfs_iext_last(ifp, &icur); |
| 184 | if (!xfs_iext_get_extent(ifp, &icur, &irec) || |
| 185 | aligned_end_fsb >= irec.br_startoff + irec.br_blockcount) |
| 186 | return aligned_end_fsb; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 187 | } |
Christoph Hellwig | ae7e403 | 2019-10-30 12:24:57 -0700 | [diff] [blame] | 188 | |
| 189 | return end_fsb; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 190 | } |
| 191 | |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 192 | int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | xfs_iomap_write_direct( |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 194 | struct xfs_inode *ip, |
| 195 | xfs_fileoff_t offset_fsb, |
| 196 | xfs_fileoff_t count_fsb, |
Christoph Hellwig | 952da06 | 2021-11-29 11:21:58 +0100 | [diff] [blame] | 197 | unsigned int flags, |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 198 | struct xfs_bmbt_irec *imap) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | { |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 200 | struct xfs_mount *mp = ip->i_mount; |
| 201 | struct xfs_trans *tp; |
| 202 | xfs_filblks_t resaligned; |
| 203 | int nimaps; |
Darrick J. Wong | 02b7ee4 | 2021-01-26 17:20:42 -0800 | [diff] [blame] | 204 | unsigned int dblocks, rblocks; |
Darrick J. Wong | 3de4eb1 | 2021-01-26 16:44:07 -0800 | [diff] [blame] | 205 | bool force = false; |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 206 | int error; |
| 207 | int bmapi_flags = XFS_BMAPI_PREALLOC; |
Chandan Babu R | 5147ef3 | 2021-03-25 11:48:18 -0700 | [diff] [blame] | 208 | int nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 210 | ASSERT(count_fsb > 0); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 211 | |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 212 | resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, |
| 213 | xfs_get_extsz_hint(ip)); |
| 214 | if (unlikely(XFS_IS_REALTIME_INODE(ip))) { |
Darrick J. Wong | 02b7ee4 | 2021-01-26 17:20:42 -0800 | [diff] [blame] | 215 | dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0); |
| 216 | rblocks = resaligned; |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 217 | } else { |
Darrick J. Wong | 02b7ee4 | 2021-01-26 17:20:42 -0800 | [diff] [blame] | 218 | dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); |
| 219 | rblocks = 0; |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 220 | } |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 221 | |
Darrick J. Wong | c14cfcc | 2018-05-04 15:30:21 -0700 | [diff] [blame] | 222 | error = xfs_qm_dqattach(ip); |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 223 | if (error) |
| 224 | return error; |
| 225 | |
| 226 | /* |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 227 | * For DAX, we do not allocate unwritten extents, but instead we zero |
| 228 | * the block before we commit the transaction. Ideally we'd like to do |
| 229 | * this outside the transaction context, but if we commit and then crash |
| 230 | * we may not have zeroed the blocks and this will be exposed on |
| 231 | * recovery of the allocation. Hence we must zero before commit. |
Dave Chinner | 3b0fe47 | 2016-01-04 16:22:45 +1100 | [diff] [blame] | 232 | * |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 233 | * Further, if we are mapping unwritten extents here, we need to zero |
| 234 | * and convert them to written so that we don't need an unwritten extent |
| 235 | * callback for DAX. This also means that we need to be able to dip into |
Dave Chinner | 3b0fe47 | 2016-01-04 16:22:45 +1100 | [diff] [blame] | 236 | * the reserve block pool for bmbt block allocation if there is no space |
| 237 | * left but we need to do unwritten extent conversion. |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 238 | */ |
Christoph Hellwig | 952da06 | 2021-11-29 11:21:58 +0100 | [diff] [blame] | 239 | if (flags & IOMAP_DAX) { |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 240 | bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO; |
Christoph Hellwig | 63fbb4c | 2017-03-28 14:53:36 -0700 | [diff] [blame] | 241 | if (imap->br_state == XFS_EXT_UNWRITTEN) { |
Darrick J. Wong | 02b7ee4 | 2021-01-26 17:20:42 -0800 | [diff] [blame] | 242 | force = true; |
Chandan Babu R | 5147ef3 | 2021-03-25 11:48:18 -0700 | [diff] [blame] | 243 | nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT; |
Darrick J. Wong | 02b7ee4 | 2021-01-26 17:20:42 -0800 | [diff] [blame] | 244 | dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; |
Dave Chinner | 3b0fe47 | 2016-01-04 16:22:45 +1100 | [diff] [blame] | 245 | } |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 246 | } |
Darrick J. Wong | 3de4eb1 | 2021-01-26 16:44:07 -0800 | [diff] [blame] | 247 | |
| 248 | error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks, |
| 249 | rblocks, force, &tp); |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 250 | if (error) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 251 | return error; |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 252 | |
Chandan Babu R | 5147ef3 | 2021-03-25 11:48:18 -0700 | [diff] [blame] | 253 | error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, nr_exts); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 254 | if (error) |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 255 | goto out_trans_cancel; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | /* |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 258 | * From this point onwards we overwrite the imap pointer that the |
| 259 | * caller gave to us. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | */ |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 261 | nimaps = 1; |
Brian Foster | da781e6 | 2019-10-21 09:26:48 -0700 | [diff] [blame] | 262 | error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0, |
| 263 | imap, &nimaps); |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 264 | if (error) |
Darrick J. Wong | 35b11010 | 2021-01-26 17:23:30 -0800 | [diff] [blame] | 265 | goto out_trans_cancel; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | |
| 267 | /* |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 268 | * Complete the transaction |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | */ |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 270 | error = xfs_trans_commit(tp); |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 271 | if (error) |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 272 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 274 | /* |
| 275 | * Copy any maps to caller's array and return any error. |
| 276 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | if (nimaps == 0) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 278 | error = -ENOSPC; |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 279 | goto out_unlock; |
Nathan Scott | 572d95f | 2006-09-28 11:03:20 +1000 | [diff] [blame] | 280 | } |
| 281 | |
Christoph Hellwig | eb77b23 | 2019-09-03 08:13:13 -0700 | [diff] [blame] | 282 | if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) |
Dave Chinner | 6d4a8ec | 2011-03-07 10:06:35 +1100 | [diff] [blame] | 283 | error = xfs_alert_fsblock_zero(ip, imap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 285 | out_unlock: |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 286 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 287 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 289 | out_trans_cancel: |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 290 | xfs_trans_cancel(tp); |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 291 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | } |
| 293 | |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 294 | STATIC bool |
| 295 | xfs_quota_need_throttle( |
Darrick J. Wong | 1a7ed27 | 2020-07-15 17:53:43 -0700 | [diff] [blame] | 296 | struct xfs_inode *ip, |
| 297 | xfs_dqtype_t type, |
| 298 | xfs_fsblock_t alloc_blocks) |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 299 | { |
Darrick J. Wong | 1a7ed27 | 2020-07-15 17:53:43 -0700 | [diff] [blame] | 300 | struct xfs_dquot *dq = xfs_inode_dquot(ip, type); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 301 | |
| 302 | if (!dq || !xfs_this_quota_on(ip->i_mount, type)) |
| 303 | return false; |
| 304 | |
| 305 | /* no hi watermark, no throttle */ |
| 306 | if (!dq->q_prealloc_hi_wmark) |
| 307 | return false; |
| 308 | |
| 309 | /* under the lo watermark, no throttle */ |
Darrick J. Wong | 784e80f | 2020-07-14 10:37:30 -0700 | [diff] [blame] | 310 | if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark) |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 311 | return false; |
| 312 | |
| 313 | return true; |
| 314 | } |
| 315 | |
| 316 | STATIC void |
| 317 | xfs_quota_calc_throttle( |
Darrick J. Wong | 1a7ed27 | 2020-07-15 17:53:43 -0700 | [diff] [blame] | 318 | struct xfs_inode *ip, |
| 319 | xfs_dqtype_t type, |
| 320 | xfs_fsblock_t *qblocks, |
| 321 | int *qshift, |
| 322 | int64_t *qfreesp) |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 323 | { |
Darrick J. Wong | 1a7ed27 | 2020-07-15 17:53:43 -0700 | [diff] [blame] | 324 | struct xfs_dquot *dq = xfs_inode_dquot(ip, type); |
| 325 | int64_t freesp; |
| 326 | int shift = 0; |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 327 | |
Eric Sandeen | 5cca3f6 | 2014-10-02 09:27:09 +1000 | [diff] [blame] | 328 | /* no dq, or over hi wmark, squash the prealloc completely */ |
Darrick J. Wong | 784e80f | 2020-07-14 10:37:30 -0700 | [diff] [blame] | 329 | if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) { |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 330 | *qblocks = 0; |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 331 | *qfreesp = 0; |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 332 | return; |
| 333 | } |
| 334 | |
Darrick J. Wong | 784e80f | 2020-07-14 10:37:30 -0700 | [diff] [blame] | 335 | freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved; |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 336 | if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { |
| 337 | shift = 2; |
| 338 | if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) |
| 339 | shift += 2; |
| 340 | if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) |
| 341 | shift += 2; |
| 342 | } |
| 343 | |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 344 | if (freesp < *qfreesp) |
| 345 | *qfreesp = freesp; |
| 346 | |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 347 | /* only overwrite the throttle values if we are more aggressive */ |
| 348 | if ((freesp >> shift) < (*qblocks >> *qshift)) { |
| 349 | *qblocks = freesp; |
| 350 | *qshift = shift; |
| 351 | } |
| 352 | } |
| 353 | |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 354 | /* |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 355 | * If we don't have a user specified preallocation size, dynamically increase |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 356 | * the preallocation size as the size of the file grows. Cap the maximum size |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 357 | * at a single extent or less if the filesystem is near full. The closer the |
Darrick J. Wong | 590b165 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 358 | * filesystem is to being full, the smaller the maximum preallocation. |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 359 | */ |
| 360 | STATIC xfs_fsblock_t |
| 361 | xfs_iomap_prealloc_size( |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 362 | struct xfs_inode *ip, |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 363 | int whichfork, |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 364 | loff_t offset, |
| 365 | loff_t count, |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 366 | struct xfs_iext_cursor *icur) |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 367 | { |
Darrick J. Wong | f0322c7 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 368 | struct xfs_iext_cursor ncur = *icur; |
| 369 | struct xfs_bmbt_irec prev, got; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 370 | struct xfs_mount *mp = ip->i_mount; |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 371 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 372 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 373 | int64_t freesp; |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 374 | xfs_fsblock_t qblocks; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 375 | xfs_fsblock_t alloc_blocks = 0; |
Darrick J. Wong | f0322c7 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 376 | xfs_extlen_t plen; |
| 377 | int shift = 0; |
| 378 | int qshift = 0; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 379 | |
Darrick J. Wong | 590b165 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 380 | /* |
| 381 | * As an exception we don't do any preallocation at all if the file is |
| 382 | * smaller than the minimum preallocation and we are using the default |
| 383 | * dynamic preallocation scheme, as it is likely this is the only write |
| 384 | * to the file that is going to be done. |
| 385 | */ |
| 386 | if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)) |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 387 | return 0; |
| 388 | |
| 389 | /* |
Darrick J. Wong | 590b165 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 390 | * Use the minimum preallocation size for small files or if we are |
| 391 | * writing right after a hole. |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 392 | */ |
Darrick J. Wong | 590b165 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 393 | if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || |
Darrick J. Wong | f0322c7 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 394 | !xfs_iext_prev_extent(ifp, &ncur, &prev) || |
Christoph Hellwig | 656152e | 2016-11-24 11:39:44 +1100 | [diff] [blame] | 395 | prev.br_startoff + prev.br_blockcount < offset_fsb) |
Christoph Hellwig | 5da8a07 | 2019-10-28 08:41:44 -0700 | [diff] [blame] | 396 | return mp->m_allocsize_blocks; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 397 | |
| 398 | /* |
Darrick J. Wong | 590b165 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 399 | * Take the size of the preceding data extents as the basis for the |
| 400 | * preallocation size. Note that we don't care if the previous extents |
| 401 | * are written or not. |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 402 | */ |
Darrick J. Wong | f0322c7 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 403 | plen = prev.br_blockcount; |
| 404 | while (xfs_iext_prev_extent(ifp, &ncur, &got)) { |
| 405 | if (plen > MAXEXTLEN / 2 || |
| 406 | isnullstartblock(got.br_startblock) || |
| 407 | got.br_startoff + got.br_blockcount != prev.br_startoff || |
| 408 | got.br_startblock + got.br_blockcount != prev.br_startblock) |
| 409 | break; |
| 410 | plen += got.br_blockcount; |
| 411 | prev = got; |
| 412 | } |
Darrick J. Wong | 590b165 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 413 | |
| 414 | /* |
| 415 | * If the size of the extents is greater than half the maximum extent |
| 416 | * length, then use the current offset as the basis. This ensures that |
| 417 | * for large files the preallocation size always extends to MAXEXTLEN |
| 418 | * rather than falling short due to things like stripe unit/width |
| 419 | * alignment of real extents. |
| 420 | */ |
Darrick J. Wong | f0322c7 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 421 | alloc_blocks = plen * 2; |
| 422 | if (alloc_blocks > MAXEXTLEN) |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 423 | alloc_blocks = XFS_B_TO_FSB(mp, offset); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 424 | qblocks = alloc_blocks; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 425 | |
Brian Foster | c9bdbdc | 2013-03-18 10:51:44 -0400 | [diff] [blame] | 426 | /* |
| 427 | * MAXEXTLEN is not a power of two value but we round the prealloc down |
| 428 | * to the nearest power of two value after throttling. To prevent the |
Darrick J. Wong | 590b165 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 429 | * round down from unconditionally reducing the maximum supported |
| 430 | * prealloc size, we round up first, apply appropriate throttling, |
| 431 | * round down and cap the value to MAXEXTLEN. |
Brian Foster | c9bdbdc | 2013-03-18 10:51:44 -0400 | [diff] [blame] | 432 | */ |
| 433 | alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), |
| 434 | alloc_blocks); |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 435 | |
Dave Chinner | 0d485ad | 2015-02-23 21:22:03 +1100 | [diff] [blame] | 436 | freesp = percpu_counter_read_positive(&mp->m_fdblocks); |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 437 | if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { |
| 438 | shift = 2; |
| 439 | if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) |
| 440 | shift++; |
| 441 | if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) |
| 442 | shift++; |
| 443 | if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) |
| 444 | shift++; |
| 445 | if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) |
| 446 | shift++; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 447 | } |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 448 | |
| 449 | /* |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 450 | * Check each quota to cap the prealloc size, provide a shift value to |
| 451 | * throttle with and adjust amount of available space. |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 452 | */ |
Darrick J. Wong | 8cd4901 | 2020-07-15 17:42:36 -0700 | [diff] [blame] | 453 | if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks)) |
| 454 | xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift, |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 455 | &freesp); |
Darrick J. Wong | 8cd4901 | 2020-07-15 17:42:36 -0700 | [diff] [blame] | 456 | if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks)) |
| 457 | xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift, |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 458 | &freesp); |
Darrick J. Wong | 8cd4901 | 2020-07-15 17:42:36 -0700 | [diff] [blame] | 459 | if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks)) |
| 460 | xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift, |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 461 | &freesp); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 462 | |
| 463 | /* |
| 464 | * The final prealloc size is set to the minimum of free space available |
| 465 | * in each of the quotas and the overall filesystem. |
| 466 | * |
| 467 | * The shift throttle value is set to the maximum value as determined by |
| 468 | * the global low free space values and per-quota low free space values. |
| 469 | */ |
Dave Chinner | 9bb54cb | 2018-06-07 07:54:02 -0700 | [diff] [blame] | 470 | alloc_blocks = min(alloc_blocks, qblocks); |
| 471 | shift = max(shift, qshift); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 472 | |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 473 | if (shift) |
| 474 | alloc_blocks >>= shift; |
Brian Foster | c9bdbdc | 2013-03-18 10:51:44 -0400 | [diff] [blame] | 475 | /* |
| 476 | * rounddown_pow_of_two() returns an undefined result if we pass in |
| 477 | * alloc_blocks = 0. |
| 478 | */ |
| 479 | if (alloc_blocks) |
| 480 | alloc_blocks = rounddown_pow_of_two(alloc_blocks); |
| 481 | if (alloc_blocks > MAXEXTLEN) |
| 482 | alloc_blocks = MAXEXTLEN; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 483 | |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 484 | /* |
| 485 | * If we are still trying to allocate more space than is |
| 486 | * available, squash the prealloc hard. This can happen if we |
| 487 | * have a large file on a small filesystem and the above |
| 488 | * lowspace thresholds are smaller than MAXEXTLEN. |
| 489 | */ |
| 490 | while (alloc_blocks && alloc_blocks >= freesp) |
| 491 | alloc_blocks >>= 4; |
Christoph Hellwig | 5da8a07 | 2019-10-28 08:41:44 -0700 | [diff] [blame] | 492 | if (alloc_blocks < mp->m_allocsize_blocks) |
| 493 | alloc_blocks = mp->m_allocsize_blocks; |
Brian Foster | 19cb7e3 | 2013-03-18 10:51:48 -0400 | [diff] [blame] | 494 | trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, |
Christoph Hellwig | 5da8a07 | 2019-10-28 08:41:44 -0700 | [diff] [blame] | 495 | mp->m_allocsize_blocks); |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 496 | return alloc_blocks; |
| 497 | } |
| 498 | |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 499 | int |
| 500 | xfs_iomap_write_unwritten( |
| 501 | xfs_inode_t *ip, |
| 502 | xfs_off_t offset, |
| 503 | xfs_off_t count, |
| 504 | bool update_isize) |
| 505 | { |
| 506 | xfs_mount_t *mp = ip->i_mount; |
| 507 | xfs_fileoff_t offset_fsb; |
| 508 | xfs_filblks_t count_fsb; |
| 509 | xfs_filblks_t numblks_fsb; |
| 510 | int nimaps; |
| 511 | xfs_trans_t *tp; |
| 512 | xfs_bmbt_irec_t imap; |
| 513 | struct inode *inode = VFS_I(ip); |
| 514 | xfs_fsize_t i_size; |
| 515 | uint resblks; |
| 516 | int error; |
| 517 | |
| 518 | trace_xfs_unwritten_convert(ip, offset, count); |
| 519 | |
| 520 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 521 | count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); |
| 522 | count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); |
| 523 | |
| 524 | /* |
| 525 | * Reserve enough blocks in this transaction for two complete extent |
| 526 | * btree splits. We may be converting the middle part of an unwritten |
| 527 | * extent and in this case we will insert two new extents in the btree |
| 528 | * each of which could cause a full split. |
| 529 | * |
| 530 | * This reservation amount will be used in the first call to |
| 531 | * xfs_bmbt_split() to select an AG with enough space to satisfy the |
| 532 | * rest of the operation. |
| 533 | */ |
| 534 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; |
| 535 | |
Darrick J. Wong | 2815a16 | 2019-11-08 23:04:20 -0800 | [diff] [blame] | 536 | /* Attach dquots so that bmbt splits are accounted correctly. */ |
| 537 | error = xfs_qm_dqattach(ip); |
| 538 | if (error) |
| 539 | return error; |
| 540 | |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 541 | do { |
| 542 | /* |
| 543 | * Set up a transaction to convert the range of extents |
| 544 | * from unwritten to real. Do allocations in a loop until |
| 545 | * we have covered the range passed in. |
| 546 | * |
| 547 | * Note that we can't risk to recursing back into the filesystem |
| 548 | * here as we might be asked to write out the same inode that we |
| 549 | * complete here and might deadlock on the iolock. |
| 550 | */ |
Darrick J. Wong | 3a1af6c | 2021-01-26 16:33:29 -0800 | [diff] [blame] | 551 | error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, |
Darrick J. Wong | 3de4eb1 | 2021-01-26 16:44:07 -0800 | [diff] [blame] | 552 | 0, true, &tp); |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 553 | if (error) |
| 554 | return error; |
| 555 | |
Chandan Babu R | c442f30 | 2021-01-22 16:48:14 -0800 | [diff] [blame] | 556 | error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, |
| 557 | XFS_IEXT_WRITE_UNWRITTEN_CNT); |
Darrick J. Wong | 2815a16 | 2019-11-08 23:04:20 -0800 | [diff] [blame] | 558 | if (error) |
| 559 | goto error_on_bmapi_transaction; |
| 560 | |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 561 | /* |
| 562 | * Modify the unwritten extent state of the buffer. |
| 563 | */ |
| 564 | nimaps = 1; |
| 565 | error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, |
| 566 | XFS_BMAPI_CONVERT, resblks, &imap, |
| 567 | &nimaps); |
| 568 | if (error) |
| 569 | goto error_on_bmapi_transaction; |
| 570 | |
| 571 | /* |
| 572 | * Log the updated inode size as we go. We have to be careful |
| 573 | * to only log it up to the actual write offset if it is |
| 574 | * halfway into a block. |
| 575 | */ |
| 576 | i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); |
| 577 | if (i_size > offset + count) |
| 578 | i_size = offset + count; |
| 579 | if (update_isize && i_size > i_size_read(inode)) |
| 580 | i_size_write(inode, i_size); |
| 581 | i_size = xfs_new_eof(ip, i_size); |
| 582 | if (i_size) { |
Christoph Hellwig | 13d2c10 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 583 | ip->i_disk_size = i_size; |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 584 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
| 585 | } |
| 586 | |
| 587 | error = xfs_trans_commit(tp); |
| 588 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 589 | if (error) |
| 590 | return error; |
| 591 | |
| 592 | if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock))) |
| 593 | return xfs_alert_fsblock_zero(ip, &imap); |
| 594 | |
| 595 | if ((numblks_fsb = imap.br_blockcount) == 0) { |
| 596 | /* |
| 597 | * The numblks_fsb value should always get |
| 598 | * smaller, otherwise the loop is stuck. |
| 599 | */ |
| 600 | ASSERT(imap.br_blockcount); |
| 601 | break; |
| 602 | } |
| 603 | offset_fsb += numblks_fsb; |
| 604 | count_fsb -= numblks_fsb; |
| 605 | } while (count_fsb > 0); |
| 606 | |
| 607 | return 0; |
| 608 | |
| 609 | error_on_bmapi_transaction: |
| 610 | xfs_trans_cancel(tp); |
| 611 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 612 | return error; |
| 613 | } |
| 614 | |
| 615 | static inline bool |
| 616 | imap_needs_alloc( |
| 617 | struct inode *inode, |
Christoph Hellwig | 5c5b6f7 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 618 | unsigned flags, |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 619 | struct xfs_bmbt_irec *imap, |
| 620 | int nimaps) |
| 621 | { |
Christoph Hellwig | 5c5b6f7 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 622 | /* don't allocate blocks when just zeroing */ |
| 623 | if (flags & IOMAP_ZERO) |
| 624 | return false; |
| 625 | if (!nimaps || |
| 626 | imap->br_startblock == HOLESTARTBLOCK || |
| 627 | imap->br_startblock == DELAYSTARTBLOCK) |
| 628 | return true; |
| 629 | /* we convert unwritten extents before copying the data for DAX */ |
Christoph Hellwig | 952da06 | 2021-11-29 11:21:58 +0100 | [diff] [blame] | 630 | if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN) |
Christoph Hellwig | 5c5b6f7 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 631 | return true; |
| 632 | return false; |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 633 | } |
| 634 | |
| 635 | static inline bool |
Christoph Hellwig | 5c5b6f7 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 636 | imap_needs_cow( |
| 637 | struct xfs_inode *ip, |
| 638 | unsigned int flags, |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 639 | struct xfs_bmbt_irec *imap, |
| 640 | int nimaps) |
| 641 | { |
Christoph Hellwig | 5c5b6f7 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 642 | if (!xfs_is_cow_inode(ip)) |
| 643 | return false; |
| 644 | |
| 645 | /* when zeroing we don't have to COW holes or unwritten extents */ |
| 646 | if (flags & IOMAP_ZERO) { |
| 647 | if (!nimaps || |
| 648 | imap->br_startblock == HOLESTARTBLOCK || |
| 649 | imap->br_state == XFS_EXT_UNWRITTEN) |
| 650 | return false; |
| 651 | } |
| 652 | |
| 653 | return true; |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 654 | } |
| 655 | |
| 656 | static int |
| 657 | xfs_ilock_for_iomap( |
| 658 | struct xfs_inode *ip, |
| 659 | unsigned flags, |
| 660 | unsigned *lockmode) |
| 661 | { |
| 662 | unsigned mode = XFS_ILOCK_SHARED; |
| 663 | bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO); |
| 664 | |
| 665 | /* |
| 666 | * COW writes may allocate delalloc space or convert unwritten COW |
| 667 | * extents, so we need to make sure to take the lock exclusively here. |
| 668 | */ |
Christoph Hellwig | 1e190f8 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 669 | if (xfs_is_cow_inode(ip) && is_write) |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 670 | mode = XFS_ILOCK_EXCL; |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 671 | |
| 672 | /* |
| 673 | * Extents not yet cached requires exclusive access, don't block. This |
| 674 | * is an opencoded xfs_ilock_data_map_shared() call but with |
| 675 | * non-blocking behaviour. |
| 676 | */ |
Christoph Hellwig | b2197a3 | 2021-04-13 11:15:12 -0700 | [diff] [blame] | 677 | if (xfs_need_iread_extents(&ip->i_df)) { |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 678 | if (flags & IOMAP_NOWAIT) |
| 679 | return -EAGAIN; |
| 680 | mode = XFS_ILOCK_EXCL; |
| 681 | } |
| 682 | |
| 683 | relock: |
| 684 | if (flags & IOMAP_NOWAIT) { |
| 685 | if (!xfs_ilock_nowait(ip, mode)) |
| 686 | return -EAGAIN; |
| 687 | } else { |
| 688 | xfs_ilock(ip, mode); |
| 689 | } |
| 690 | |
| 691 | /* |
| 692 | * The reflink iflag could have changed since the earlier unlocked |
| 693 | * check, so if we got ILOCK_SHARED for a write and but we're now a |
| 694 | * reflink inode we have to switch to ILOCK_EXCL and relock. |
| 695 | */ |
| 696 | if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) { |
| 697 | xfs_iunlock(ip, mode); |
| 698 | mode = XFS_ILOCK_EXCL; |
| 699 | goto relock; |
| 700 | } |
| 701 | |
| 702 | *lockmode = mode; |
| 703 | return 0; |
| 704 | } |
| 705 | |
Dave Chinner | 883a790 | 2020-11-19 08:59:11 -0800 | [diff] [blame] | 706 | /* |
| 707 | * Check that the imap we are going to return to the caller spans the entire |
| 708 | * range that the caller requested for the IO. |
| 709 | */ |
| 710 | static bool |
| 711 | imap_spans_range( |
| 712 | struct xfs_bmbt_irec *imap, |
| 713 | xfs_fileoff_t offset_fsb, |
| 714 | xfs_fileoff_t end_fsb) |
| 715 | { |
| 716 | if (imap->br_startoff > offset_fsb) |
| 717 | return false; |
| 718 | if (imap->br_startoff + imap->br_blockcount < end_fsb) |
| 719 | return false; |
| 720 | return true; |
| 721 | } |
| 722 | |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 723 | static int |
Christoph Hellwig | f150b42 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 724 | xfs_direct_write_iomap_begin( |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 725 | struct inode *inode, |
| 726 | loff_t offset, |
| 727 | loff_t length, |
| 728 | unsigned flags, |
| 729 | struct iomap *iomap, |
| 730 | struct iomap *srcmap) |
| 731 | { |
| 732 | struct xfs_inode *ip = XFS_I(inode); |
| 733 | struct xfs_mount *mp = ip->i_mount; |
| 734 | struct xfs_bmbt_irec imap, cmap; |
| 735 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 736 | xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); |
| 737 | int nimaps = 1, error = 0; |
| 738 | bool shared = false; |
| 739 | u16 iomap_flags = 0; |
| 740 | unsigned lockmode; |
| 741 | |
| 742 | ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO)); |
| 743 | |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 744 | if (xfs_is_shutdown(mp)) |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 745 | return -EIO; |
| 746 | |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 747 | /* |
Christoph Hellwig | 5c5b6f7 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 748 | * Writes that span EOF might trigger an IO size update on completion, |
| 749 | * so consider them to be dirty for the purposes of O_DSYNC even if |
| 750 | * there is no other metadata changes pending or have been made here. |
| 751 | */ |
| 752 | if (offset + length > i_size_read(inode)) |
| 753 | iomap_flags |= IOMAP_F_DIRTY; |
| 754 | |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 755 | error = xfs_ilock_for_iomap(ip, flags, &lockmode); |
| 756 | if (error) |
| 757 | return error; |
| 758 | |
| 759 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, |
| 760 | &nimaps, 0); |
| 761 | if (error) |
| 762 | goto out_unlock; |
| 763 | |
Christoph Hellwig | 5c5b6f7 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 764 | if (imap_needs_cow(ip, flags, &imap, nimaps)) { |
Christoph Hellwig | 1e190f8 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 765 | error = -EAGAIN; |
| 766 | if (flags & IOMAP_NOWAIT) |
| 767 | goto out_unlock; |
| 768 | |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 769 | /* may drop and re-acquire the ilock */ |
| 770 | error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared, |
| 771 | &lockmode, flags & IOMAP_DIRECT); |
| 772 | if (error) |
| 773 | goto out_unlock; |
| 774 | if (shared) |
| 775 | goto out_found_cow; |
| 776 | end_fsb = imap.br_startoff + imap.br_blockcount; |
| 777 | length = XFS_FSB_TO_B(mp, end_fsb) - offset; |
| 778 | } |
| 779 | |
Christoph Hellwig | 5c5b6f7 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 780 | if (imap_needs_alloc(inode, flags, &imap, nimaps)) |
| 781 | goto allocate_blocks; |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 782 | |
Dave Chinner | 883a790 | 2020-11-19 08:59:11 -0800 | [diff] [blame] | 783 | /* |
Dave Chinner | ed1128c | 2021-01-23 10:06:31 -0800 | [diff] [blame] | 784 | * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with |
| 785 | * a single map so that we avoid partial IO failures due to the rest of |
| 786 | * the I/O range not covered by this map triggering an EAGAIN condition |
| 787 | * when it is subsequently mapped and aborting the I/O. |
Dave Chinner | 883a790 | 2020-11-19 08:59:11 -0800 | [diff] [blame] | 788 | */ |
Dave Chinner | ed1128c | 2021-01-23 10:06:31 -0800 | [diff] [blame] | 789 | if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) { |
Dave Chinner | 883a790 | 2020-11-19 08:59:11 -0800 | [diff] [blame] | 790 | error = -EAGAIN; |
Dave Chinner | ed1128c | 2021-01-23 10:06:31 -0800 | [diff] [blame] | 791 | if (!imap_spans_range(&imap, offset_fsb, end_fsb)) |
| 792 | goto out_unlock; |
| 793 | } |
| 794 | |
| 795 | /* |
| 796 | * For overwrite only I/O, we cannot convert unwritten extents without |
| 797 | * requiring sub-block zeroing. This can only be done under an |
| 798 | * exclusive IOLOCK, hence return -EAGAIN if this is not a written |
| 799 | * extent to tell the caller to try again. |
| 800 | */ |
| 801 | if (flags & IOMAP_OVERWRITE_ONLY) { |
| 802 | error = -EAGAIN; |
| 803 | if (imap.br_state != XFS_EXT_NORM && |
| 804 | ((offset | length) & mp->m_blockmask)) |
| 805 | goto out_unlock; |
Dave Chinner | 883a790 | 2020-11-19 08:59:11 -0800 | [diff] [blame] | 806 | } |
| 807 | |
Christoph Hellwig | 5c5b6f7 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 808 | xfs_iunlock(ip, lockmode); |
| 809 | trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 810 | return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags); |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 811 | |
Christoph Hellwig | 5c5b6f7 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 812 | allocate_blocks: |
| 813 | error = -EAGAIN; |
Dave Chinner | ed1128c | 2021-01-23 10:06:31 -0800 | [diff] [blame] | 814 | if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 815 | goto out_unlock; |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 816 | |
| 817 | /* |
| 818 | * We cap the maximum length we map to a sane size to keep the chunks |
| 819 | * of work done where somewhat symmetric with the work writeback does. |
| 820 | * This is a completely arbitrary number pulled out of thin air as a |
| 821 | * best guess for initial testing. |
| 822 | * |
| 823 | * Note that the values needs to be less than 32-bits wide until the |
| 824 | * lower level functions are updated. |
| 825 | */ |
| 826 | length = min_t(loff_t, length, 1024 * PAGE_SIZE); |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 827 | end_fsb = xfs_iomap_end_fsb(mp, offset, length); |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 828 | |
Christoph Hellwig | e696663 | 2019-10-30 12:24:59 -0700 | [diff] [blame] | 829 | if (offset + length > XFS_ISIZE(ip)) |
| 830 | end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb); |
| 831 | else if (nimaps && imap.br_startblock == HOLESTARTBLOCK) |
| 832 | end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount); |
| 833 | xfs_iunlock(ip, lockmode); |
| 834 | |
| 835 | error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb, |
Christoph Hellwig | 952da06 | 2021-11-29 11:21:58 +0100 | [diff] [blame] | 836 | flags, &imap); |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 837 | if (error) |
| 838 | return error; |
| 839 | |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 840 | trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 841 | return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, |
| 842 | iomap_flags | IOMAP_F_NEW); |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 843 | |
| 844 | out_found_cow: |
| 845 | xfs_iunlock(ip, lockmode); |
| 846 | length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount); |
| 847 | trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap); |
| 848 | if (imap.br_startblock != HOLESTARTBLOCK) { |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 849 | error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0); |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 850 | if (error) |
| 851 | return error; |
| 852 | } |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 853 | return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED); |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 854 | |
| 855 | out_unlock: |
Darrick J. Wong | f273387 | 2021-01-27 10:07:27 -0800 | [diff] [blame] | 856 | if (lockmode) |
| 857 | xfs_iunlock(ip, lockmode); |
Christoph Hellwig | a526c85 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 858 | return error; |
| 859 | } |
| 860 | |
Christoph Hellwig | f150b42 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 861 | const struct iomap_ops xfs_direct_write_iomap_ops = { |
| 862 | .iomap_begin = xfs_direct_write_iomap_begin, |
| 863 | }; |
| 864 | |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 865 | static int |
Christoph Hellwig | f150b42 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 866 | xfs_buffered_write_iomap_begin( |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 867 | struct inode *inode, |
| 868 | loff_t offset, |
| 869 | loff_t count, |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 870 | unsigned flags, |
Christoph Hellwig | 36adcba | 2019-10-19 09:09:44 -0700 | [diff] [blame] | 871 | struct iomap *iomap, |
| 872 | struct iomap *srcmap) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | { |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 874 | struct xfs_inode *ip = XFS_I(inode); |
| 875 | struct xfs_mount *mp = ip->i_mount; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 876 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
Christoph Hellwig | 4356822 | 2019-10-19 09:09:44 -0700 | [diff] [blame] | 877 | xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count); |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 878 | struct xfs_bmbt_irec imap, cmap; |
| 879 | struct xfs_iext_cursor icur, ccur; |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 880 | xfs_fsblock_t prealloc_blocks = 0; |
Christoph Hellwig | c4feb0b | 2019-02-18 09:38:48 -0800 | [diff] [blame] | 881 | bool eof = false, cow_eof = false, shared = false; |
Christoph Hellwig | 12dfb58 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 882 | int allocfork = XFS_DATA_FORK; |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 883 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 885 | if (xfs_is_shutdown(mp)) |
Brian Foster | e482669 | 2021-02-10 17:27:20 -0800 | [diff] [blame] | 886 | return -EIO; |
| 887 | |
Christoph Hellwig | f150b42 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 888 | /* we can't use delayed allocations when using extent size hints */ |
| 889 | if (xfs_get_extsz_hint(ip)) |
| 890 | return xfs_direct_write_iomap_begin(inode, offset, count, |
| 891 | flags, iomap, srcmap); |
| 892 | |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 893 | ASSERT(!XFS_IS_REALTIME_INODE(ip)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 895 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
| 896 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 897 | if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || |
Darrick J. Wong | a71895c | 2019-11-11 12:53:22 -0800 | [diff] [blame] | 898 | XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 899 | error = -EFSCORRUPTED; |
| 900 | goto out_unlock; |
| 901 | } |
| 902 | |
| 903 | XFS_STATS_INC(mp, xs_blk_mapw); |
| 904 | |
Christoph Hellwig | 862a804 | 2021-04-13 11:15:09 -0700 | [diff] [blame] | 905 | error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); |
| 906 | if (error) |
| 907 | goto out_unlock; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 908 | |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 909 | /* |
Randy Dunlap | b63da6c | 2020-08-05 08:49:58 -0700 | [diff] [blame] | 910 | * Search the data fork first to look up our source mapping. We |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 911 | * always need the data fork map, as we have to return it to the |
| 912 | * iomap code so that the higher level write code can read data in to |
| 913 | * perform read-modify-write cycles for unaligned writes. |
| 914 | */ |
| 915 | eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap); |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 916 | if (eof) |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 917 | imap.br_startoff = end_fsb; /* fake hole until the end */ |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 918 | |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 919 | /* We never need to allocate blocks for zeroing a hole. */ |
| 920 | if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) { |
| 921 | xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff); |
| 922 | goto out_unlock; |
| 923 | } |
| 924 | |
| 925 | /* |
| 926 | * Search the COW fork extent list even if we did not find a data fork |
| 927 | * extent. This serves two purposes: first this implements the |
| 928 | * speculative preallocation using cowextsize, so that we also unshare |
| 929 | * block adjacent to shared blocks instead of just the shared blocks |
| 930 | * themselves. Second the lookup in the extent list is generally faster |
| 931 | * than going out to the shared extent tree. |
| 932 | */ |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 933 | if (xfs_is_cow_inode(ip)) { |
| 934 | if (!ip->i_cowfp) { |
| 935 | ASSERT(!xfs_is_reflink_inode(ip)); |
| 936 | xfs_ifork_init_cow(ip); |
| 937 | } |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 938 | cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, |
| 939 | &ccur, &cmap); |
| 940 | if (!cow_eof && cmap.br_startoff <= offset_fsb) { |
| 941 | trace_xfs_reflink_cow_found(ip, &cmap); |
Christoph Hellwig | ae36b53 | 2019-10-19 09:09:43 -0700 | [diff] [blame] | 942 | goto found_cow; |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 943 | } |
| 944 | } |
| 945 | |
| 946 | if (imap.br_startoff <= offset_fsb) { |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 947 | /* |
| 948 | * For reflink files we may need a delalloc reservation when |
| 949 | * overwriting shared extents. This includes zeroing of |
| 950 | * existing extents that contain data. |
| 951 | */ |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 952 | if (!xfs_is_cow_inode(ip) || |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 953 | ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) { |
| 954 | trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, |
| 955 | &imap); |
Christoph Hellwig | ae36b53 | 2019-10-19 09:09:43 -0700 | [diff] [blame] | 956 | goto found_imap; |
Christoph Hellwig | 3ba020b | 2016-10-20 15:53:50 +1100 | [diff] [blame] | 957 | } |
| 958 | |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 959 | xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 960 | |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 961 | /* Trim the mapping to the nearest shared extent boundary. */ |
zhengbin | aa12443 | 2020-01-20 14:34:47 -0800 | [diff] [blame] | 962 | error = xfs_bmap_trim_cow(ip, &imap, &shared); |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 963 | if (error) |
| 964 | goto out_unlock; |
| 965 | |
| 966 | /* Not shared? Just report the (potentially capped) extent. */ |
| 967 | if (!shared) { |
| 968 | trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, |
| 969 | &imap); |
Christoph Hellwig | ae36b53 | 2019-10-19 09:09:43 -0700 | [diff] [blame] | 970 | goto found_imap; |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 971 | } |
| 972 | |
| 973 | /* |
| 974 | * Fork all the shared blocks from our write offset until the |
| 975 | * end of the extent. |
| 976 | */ |
Christoph Hellwig | 12dfb58 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 977 | allocfork = XFS_COW_FORK; |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 978 | end_fsb = imap.br_startoff + imap.br_blockcount; |
| 979 | } else { |
| 980 | /* |
| 981 | * We cap the maximum length we map here to MAX_WRITEBACK_PAGES |
| 982 | * pages to keep the chunks of work done where somewhat |
| 983 | * symmetric with the work writeback does. This is a completely |
| 984 | * arbitrary number pulled out of thin air. |
| 985 | * |
| 986 | * Note that the values needs to be less than 32-bits wide until |
| 987 | * the lower level functions are updated. |
| 988 | */ |
| 989 | count = min_t(loff_t, count, 1024 * PAGE_SIZE); |
Christoph Hellwig | 4356822 | 2019-10-19 09:09:44 -0700 | [diff] [blame] | 990 | end_fsb = xfs_iomap_end_fsb(mp, offset, count); |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 991 | |
| 992 | if (xfs_is_always_cow_inode(ip)) |
Christoph Hellwig | 12dfb58 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 993 | allocfork = XFS_COW_FORK; |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 994 | } |
| 995 | |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 996 | error = xfs_qm_dqattach_locked(ip, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 997 | if (error) |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 998 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | |
Darrick J. Wong | 590b165 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 1000 | if (eof && offset + count > XFS_ISIZE(ip)) { |
| 1001 | /* |
| 1002 | * Determine the initial size of the preallocation. |
| 1003 | * We clean up any extra preallocation when the file is closed. |
| 1004 | */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1005 | if (xfs_has_allocsize(mp)) |
Darrick J. Wong | 590b165 | 2020-05-23 09:43:30 -0700 | [diff] [blame] | 1006 | prealloc_blocks = mp->m_allocsize_blocks; |
| 1007 | else |
| 1008 | prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, |
| 1009 | offset, count, &icur); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 1010 | if (prealloc_blocks) { |
| 1011 | xfs_extlen_t align; |
| 1012 | xfs_off_t end_offset; |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 1013 | xfs_fileoff_t p_end_fsb; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 1014 | |
Christoph Hellwig | 5da8a07 | 2019-10-28 08:41:44 -0700 | [diff] [blame] | 1015 | end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1); |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 1016 | p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) + |
| 1017 | prealloc_blocks; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 1018 | |
Christoph Hellwig | 57c4944 | 2019-10-30 12:24:58 -0700 | [diff] [blame] | 1019 | align = xfs_eof_alignment(ip); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 1020 | if (align) |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 1021 | p_end_fsb = roundup_64(p_end_fsb, align); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 1022 | |
Christoph Hellwig | 4356822 | 2019-10-19 09:09:44 -0700 | [diff] [blame] | 1023 | p_end_fsb = min(p_end_fsb, |
| 1024 | XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 1025 | ASSERT(p_end_fsb > offset_fsb); |
| 1026 | prealloc_blocks = p_end_fsb - end_fsb; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 1027 | } |
| 1028 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1029 | |
Dave Chinner | 8de2bf9 | 2009-04-06 18:49:12 +0200 | [diff] [blame] | 1030 | retry: |
Christoph Hellwig | 12dfb58 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 1031 | error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb, |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 1032 | end_fsb - offset_fsb, prealloc_blocks, |
Christoph Hellwig | 12dfb58 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 1033 | allocfork == XFS_DATA_FORK ? &imap : &cmap, |
| 1034 | allocfork == XFS_DATA_FORK ? &icur : &ccur, |
| 1035 | allocfork == XFS_DATA_FORK ? eof : cow_eof); |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 1036 | switch (error) { |
| 1037 | case 0: |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 1038 | break; |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1039 | case -ENOSPC: |
| 1040 | case -EDQUOT: |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 1041 | /* retry without any preallocation */ |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1042 | trace_xfs_delalloc_enospc(ip, offset, count); |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 1043 | if (prealloc_blocks) { |
| 1044 | prealloc_blocks = 0; |
Dave Chinner | 9aa0500 | 2012-10-08 21:56:04 +1100 | [diff] [blame] | 1045 | goto retry; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 1046 | } |
Gustavo A. R. Silva | 53004ee | 2021-04-20 17:54:36 -0500 | [diff] [blame] | 1047 | fallthrough; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 1048 | default: |
| 1049 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | } |
| 1051 | |
Christoph Hellwig | 12dfb58 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 1052 | if (allocfork == XFS_COW_FORK) { |
| 1053 | trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap); |
Christoph Hellwig | ae36b53 | 2019-10-19 09:09:43 -0700 | [diff] [blame] | 1054 | goto found_cow; |
| 1055 | } |
| 1056 | |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1057 | /* |
| 1058 | * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch |
| 1059 | * them out if the write happens to fail. |
| 1060 | */ |
Christoph Hellwig | ae36b53 | 2019-10-19 09:09:43 -0700 | [diff] [blame] | 1061 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
Christoph Hellwig | 12dfb58 | 2019-10-19 09:09:47 -0700 | [diff] [blame] | 1062 | trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap); |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 1063 | return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW); |
Christoph Hellwig | ae36b53 | 2019-10-19 09:09:43 -0700 | [diff] [blame] | 1064 | |
| 1065 | found_imap: |
| 1066 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 1067 | return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0); |
Christoph Hellwig | ae36b53 | 2019-10-19 09:09:43 -0700 | [diff] [blame] | 1068 | |
| 1069 | found_cow: |
| 1070 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 1071 | if (imap.br_startoff <= offset_fsb) { |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 1072 | error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0); |
Christoph Hellwig | 36adcba | 2019-10-19 09:09:44 -0700 | [diff] [blame] | 1073 | if (error) |
| 1074 | return error; |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 1075 | return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, |
| 1076 | IOMAP_F_SHARED); |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 1077 | } |
Darrick J. Wong | 72a048c | 2021-08-20 14:42:39 -0700 | [diff] [blame] | 1078 | |
| 1079 | xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb); |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 1080 | return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0); |
Christoph Hellwig | ae36b53 | 2019-10-19 09:09:43 -0700 | [diff] [blame] | 1081 | |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 1082 | out_unlock: |
| 1083 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 1084 | return error; |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1085 | } |
| 1086 | |
| 1087 | static int |
Christoph Hellwig | f150b42 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 1088 | xfs_buffered_write_iomap_end( |
| 1089 | struct inode *inode, |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1090 | loff_t offset, |
| 1091 | loff_t length, |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1092 | ssize_t written, |
Christoph Hellwig | f150b42 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 1093 | unsigned flags, |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1094 | struct iomap *iomap) |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1095 | { |
Christoph Hellwig | f150b42 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 1096 | struct xfs_inode *ip = XFS_I(inode); |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1097 | struct xfs_mount *mp = ip->i_mount; |
| 1098 | xfs_fileoff_t start_fsb; |
| 1099 | xfs_fileoff_t end_fsb; |
| 1100 | int error = 0; |
| 1101 | |
Christoph Hellwig | f150b42 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 1102 | if (iomap->type != IOMAP_DELALLOC) |
| 1103 | return 0; |
| 1104 | |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1105 | /* |
| 1106 | * Behave as if the write failed if drop writes is enabled. Set the NEW |
| 1107 | * flag to force delalloc cleanup. |
| 1108 | */ |
Darrick J. Wong | f8c4725 | 2017-06-20 17:54:48 -0700 | [diff] [blame] | 1109 | if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) { |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1110 | iomap->flags |= IOMAP_F_NEW; |
Brian Foster | 9dbddd7 | 2017-02-13 22:48:17 -0800 | [diff] [blame] | 1111 | written = 0; |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1112 | } |
Brian Foster | 9dbddd7 | 2017-02-13 22:48:17 -0800 | [diff] [blame] | 1113 | |
Brian Foster | fa7f138 | 2017-02-16 17:19:12 -0800 | [diff] [blame] | 1114 | /* |
| 1115 | * start_fsb refers to the first unused block after a short write. If |
| 1116 | * nothing was written, round offset down to point at the first block in |
| 1117 | * the range. |
| 1118 | */ |
| 1119 | if (unlikely(!written)) |
| 1120 | start_fsb = XFS_B_TO_FSBT(mp, offset); |
| 1121 | else |
| 1122 | start_fsb = XFS_B_TO_FSB(mp, offset + written); |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1123 | end_fsb = XFS_B_TO_FSB(mp, offset + length); |
| 1124 | |
| 1125 | /* |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1126 | * Trim delalloc blocks if they were allocated by this write and we |
| 1127 | * didn't manage to write the whole range. |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1128 | * |
| 1129 | * We don't need to care about racing delalloc as we hold i_mutex |
| 1130 | * across the reserve/allocate/unreserve calls. If there are delalloc |
| 1131 | * blocks in the range, they are ours. |
| 1132 | */ |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1133 | if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) { |
Brian Foster | fa7f138 | 2017-02-16 17:19:12 -0800 | [diff] [blame] | 1134 | truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), |
| 1135 | XFS_FSB_TO_B(mp, end_fsb) - 1); |
| 1136 | |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1137 | error = xfs_bmap_punch_delalloc_range(ip, start_fsb, |
| 1138 | end_fsb - start_fsb); |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 1139 | if (error && !xfs_is_shutdown(mp)) { |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1140 | xfs_alert(mp, "%s: unable to clean up ino %lld", |
| 1141 | __func__, ip->i_ino); |
| 1142 | return error; |
| 1143 | } |
| 1144 | } |
| 1145 | |
| 1146 | return 0; |
| 1147 | } |
| 1148 | |
Christoph Hellwig | f150b42 | 2019-10-19 09:09:46 -0700 | [diff] [blame] | 1149 | const struct iomap_ops xfs_buffered_write_iomap_ops = { |
| 1150 | .iomap_begin = xfs_buffered_write_iomap_begin, |
| 1151 | .iomap_end = xfs_buffered_write_iomap_end, |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1152 | }; |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1153 | |
| 1154 | static int |
Christoph Hellwig | 690c2a3 | 2019-10-19 09:09:45 -0700 | [diff] [blame] | 1155 | xfs_read_iomap_begin( |
| 1156 | struct inode *inode, |
| 1157 | loff_t offset, |
| 1158 | loff_t length, |
| 1159 | unsigned flags, |
| 1160 | struct iomap *iomap, |
| 1161 | struct iomap *srcmap) |
| 1162 | { |
| 1163 | struct xfs_inode *ip = XFS_I(inode); |
| 1164 | struct xfs_mount *mp = ip->i_mount; |
| 1165 | struct xfs_bmbt_irec imap; |
| 1166 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 1167 | xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); |
| 1168 | int nimaps = 1, error = 0; |
| 1169 | bool shared = false; |
| 1170 | unsigned lockmode; |
| 1171 | |
| 1172 | ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO))); |
| 1173 | |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 1174 | if (xfs_is_shutdown(mp)) |
Christoph Hellwig | 690c2a3 | 2019-10-19 09:09:45 -0700 | [diff] [blame] | 1175 | return -EIO; |
| 1176 | |
| 1177 | error = xfs_ilock_for_iomap(ip, flags, &lockmode); |
| 1178 | if (error) |
| 1179 | return error; |
| 1180 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, |
| 1181 | &nimaps, 0); |
| 1182 | if (!error && (flags & IOMAP_REPORT)) |
| 1183 | error = xfs_reflink_trim_around_shared(ip, &imap, &shared); |
| 1184 | xfs_iunlock(ip, lockmode); |
| 1185 | |
| 1186 | if (error) |
| 1187 | return error; |
| 1188 | trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 1189 | return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, |
| 1190 | shared ? IOMAP_F_SHARED : 0); |
Christoph Hellwig | 690c2a3 | 2019-10-19 09:09:45 -0700 | [diff] [blame] | 1191 | } |
| 1192 | |
| 1193 | const struct iomap_ops xfs_read_iomap_ops = { |
| 1194 | .iomap_begin = xfs_read_iomap_begin, |
| 1195 | }; |
| 1196 | |
| 1197 | static int |
Christoph Hellwig | 60271ab7 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1198 | xfs_seek_iomap_begin( |
| 1199 | struct inode *inode, |
| 1200 | loff_t offset, |
| 1201 | loff_t length, |
| 1202 | unsigned flags, |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 1203 | struct iomap *iomap, |
| 1204 | struct iomap *srcmap) |
Christoph Hellwig | 60271ab7 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1205 | { |
| 1206 | struct xfs_inode *ip = XFS_I(inode); |
| 1207 | struct xfs_mount *mp = ip->i_mount; |
| 1208 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 1209 | xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); |
| 1210 | xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF; |
| 1211 | struct xfs_iext_cursor icur; |
| 1212 | struct xfs_bmbt_irec imap, cmap; |
| 1213 | int error = 0; |
| 1214 | unsigned lockmode; |
| 1215 | |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 1216 | if (xfs_is_shutdown(mp)) |
Christoph Hellwig | 60271ab7 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1217 | return -EIO; |
| 1218 | |
| 1219 | lockmode = xfs_ilock_data_map_shared(ip); |
Christoph Hellwig | 862a804 | 2021-04-13 11:15:09 -0700 | [diff] [blame] | 1220 | error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); |
| 1221 | if (error) |
| 1222 | goto out_unlock; |
Christoph Hellwig | 60271ab7 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1223 | |
| 1224 | if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) { |
| 1225 | /* |
| 1226 | * If we found a data extent we are done. |
| 1227 | */ |
| 1228 | if (imap.br_startoff <= offset_fsb) |
| 1229 | goto done; |
| 1230 | data_fsb = imap.br_startoff; |
| 1231 | } else { |
| 1232 | /* |
| 1233 | * Fake a hole until the end of the file. |
| 1234 | */ |
Christoph Hellwig | 4356822 | 2019-10-19 09:09:44 -0700 | [diff] [blame] | 1235 | data_fsb = xfs_iomap_end_fsb(mp, offset, length); |
Christoph Hellwig | 60271ab7 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1236 | } |
| 1237 | |
| 1238 | /* |
| 1239 | * If a COW fork extent covers the hole, report it - capped to the next |
| 1240 | * data fork extent: |
| 1241 | */ |
| 1242 | if (xfs_inode_has_cow_data(ip) && |
| 1243 | xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) |
| 1244 | cow_fsb = cmap.br_startoff; |
| 1245 | if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) { |
| 1246 | if (data_fsb < cow_fsb + cmap.br_blockcount) |
| 1247 | end_fsb = min(end_fsb, data_fsb); |
| 1248 | xfs_trim_extent(&cmap, offset_fsb, end_fsb); |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 1249 | error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, |
| 1250 | IOMAP_F_SHARED); |
Christoph Hellwig | 60271ab7 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1251 | /* |
| 1252 | * This is a COW extent, so we must probe the page cache |
| 1253 | * because there could be dirty page cache being backed |
| 1254 | * by this extent. |
| 1255 | */ |
| 1256 | iomap->type = IOMAP_UNWRITTEN; |
| 1257 | goto out_unlock; |
| 1258 | } |
| 1259 | |
| 1260 | /* |
| 1261 | * Else report a hole, capped to the next found data or COW extent. |
| 1262 | */ |
| 1263 | if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb) |
| 1264 | imap.br_blockcount = cow_fsb - offset_fsb; |
| 1265 | else |
| 1266 | imap.br_blockcount = data_fsb - offset_fsb; |
| 1267 | imap.br_startoff = offset_fsb; |
| 1268 | imap.br_startblock = HOLESTARTBLOCK; |
| 1269 | imap.br_state = XFS_EXT_NORM; |
| 1270 | done: |
| 1271 | xfs_trim_extent(&imap, offset_fsb, end_fsb); |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 1272 | error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0); |
Christoph Hellwig | 60271ab7 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1273 | out_unlock: |
| 1274 | xfs_iunlock(ip, lockmode); |
| 1275 | return error; |
| 1276 | } |
| 1277 | |
| 1278 | const struct iomap_ops xfs_seek_iomap_ops = { |
| 1279 | .iomap_begin = xfs_seek_iomap_begin, |
| 1280 | }; |
| 1281 | |
| 1282 | static int |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1283 | xfs_xattr_iomap_begin( |
| 1284 | struct inode *inode, |
| 1285 | loff_t offset, |
| 1286 | loff_t length, |
| 1287 | unsigned flags, |
Goldwyn Rodrigues | c039b99 | 2019-10-18 16:44:10 -0700 | [diff] [blame] | 1288 | struct iomap *iomap, |
| 1289 | struct iomap *srcmap) |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1290 | { |
| 1291 | struct xfs_inode *ip = XFS_I(inode); |
| 1292 | struct xfs_mount *mp = ip->i_mount; |
| 1293 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 1294 | xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); |
| 1295 | struct xfs_bmbt_irec imap; |
| 1296 | int nimaps = 1, error = 0; |
| 1297 | unsigned lockmode; |
| 1298 | |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 1299 | if (xfs_is_shutdown(mp)) |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1300 | return -EIO; |
| 1301 | |
Darrick J. Wong | 84358536d | 2017-04-06 16:00:39 -0700 | [diff] [blame] | 1302 | lockmode = xfs_ilock_attr_map_shared(ip); |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1303 | |
| 1304 | /* if there are no attribute fork or extents, return ENOENT */ |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 1305 | if (!XFS_IFORK_Q(ip) || !ip->i_afp->if_nextents) { |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1306 | error = -ENOENT; |
| 1307 | goto out_unlock; |
| 1308 | } |
| 1309 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 1310 | ASSERT(ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL); |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1311 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, |
Darrick J. Wong | b7e0b6f | 2017-12-06 16:13:35 -0800 | [diff] [blame] | 1312 | &nimaps, XFS_BMAPI_ATTRFORK); |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1313 | out_unlock: |
| 1314 | xfs_iunlock(ip, lockmode); |
| 1315 | |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1316 | if (error) |
| 1317 | return error; |
| 1318 | ASSERT(nimaps); |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 1319 | return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0); |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1320 | } |
| 1321 | |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 1322 | const struct iomap_ops xfs_xattr_iomap_ops = { |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1323 | .iomap_begin = xfs_xattr_iomap_begin, |
| 1324 | }; |
Shiyang Ruan | f1ba5fa | 2021-11-29 11:21:49 +0100 | [diff] [blame] | 1325 | |
| 1326 | int |
| 1327 | xfs_zero_range( |
| 1328 | struct xfs_inode *ip, |
| 1329 | loff_t pos, |
| 1330 | loff_t len, |
| 1331 | bool *did_zero) |
| 1332 | { |
| 1333 | struct inode *inode = VFS_I(ip); |
| 1334 | |
Christoph Hellwig | c6f4046 | 2021-11-29 11:21:52 +0100 | [diff] [blame] | 1335 | if (IS_DAX(inode)) |
| 1336 | return dax_zero_range(inode, pos, len, did_zero, |
Christoph Hellwig | a50f6ab | 2021-11-29 11:21:56 +0100 | [diff] [blame] | 1337 | &xfs_direct_write_iomap_ops); |
Shiyang Ruan | f1ba5fa | 2021-11-29 11:21:49 +0100 | [diff] [blame] | 1338 | return iomap_zero_range(inode, pos, len, did_zero, |
| 1339 | &xfs_buffered_write_iomap_ops); |
| 1340 | } |
| 1341 | |
| 1342 | int |
| 1343 | xfs_truncate_page( |
| 1344 | struct xfs_inode *ip, |
| 1345 | loff_t pos, |
| 1346 | bool *did_zero) |
| 1347 | { |
| 1348 | struct inode *inode = VFS_I(ip); |
| 1349 | |
Christoph Hellwig | c6f4046 | 2021-11-29 11:21:52 +0100 | [diff] [blame] | 1350 | if (IS_DAX(inode)) |
| 1351 | return dax_truncate_page(inode, pos, did_zero, |
Christoph Hellwig | a50f6ab | 2021-11-29 11:21:56 +0100 | [diff] [blame] | 1352 | &xfs_direct_write_iomap_ops); |
Shiyang Ruan | f1ba5fa | 2021-11-29 11:21:49 +0100 | [diff] [blame] | 1353 | return iomap_truncate_page(inode, pos, did_zero, |
| 1354 | &xfs_buffered_write_iomap_ops); |
| 1355 | } |