Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Olaf Weber | 3e57ecf | 2006-06-09 14:48:12 +1000 | [diff] [blame] | 3 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. |
Christoph Hellwig | 98c1a7c0 | 2018-07-11 22:26:06 -0700 | [diff] [blame] | 4 | * Copyright (c) 2016-2018 Christoph Hellwig. |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
Christoph Hellwig | 3b3dce0 | 2016-06-21 09:52:47 +1000 | [diff] [blame] | 7 | #include <linux/iomap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include "xfs.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include "xfs_fs.h" |
Dave Chinner | 70a9883 | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 10 | #include "xfs_shared.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 11 | #include "xfs_format.h" |
| 12 | #include "xfs_log_format.h" |
| 13 | #include "xfs_trans_resv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include "xfs_mount.h" |
Darrick J. Wong | 3ab78df | 2016-08-03 11:15:38 +1000 | [diff] [blame] | 15 | #include "xfs_defer.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include "xfs_inode.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 17 | #include "xfs_btree.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 18 | #include "xfs_bmap_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include "xfs_bmap.h" |
Dave Chinner | 6898811 | 2013-08-12 20:49:42 +1000 | [diff] [blame] | 20 | #include "xfs_bmap_util.h" |
Darrick J. Wong | e9e899a | 2017-10-31 12:04:49 -0700 | [diff] [blame] | 21 | #include "xfs_errortag.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include "xfs_error.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 23 | #include "xfs_trans.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include "xfs_trans_space.h" |
Christoph Hellwig | a39e596 | 2017-11-01 16:36:47 +0100 | [diff] [blame] | 25 | #include "xfs_inode_item.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include "xfs_iomap.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 27 | #include "xfs_trace.h" |
Brian Foster | 27b5286 | 2012-11-06 09:50:38 -0500 | [diff] [blame] | 28 | #include "xfs_icache.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 29 | #include "xfs_quota.h" |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 30 | #include "xfs_dquot_item.h" |
| 31 | #include "xfs_dquot.h" |
Darrick J. Wong | 2a06705 | 2016-10-03 09:11:33 -0700 | [diff] [blame] | 32 | #include "xfs_reflink.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
| 35 | #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ |
| 36 | << mp->m_writeio_log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 38 | static int |
| 39 | xfs_alert_fsblock_zero( |
| 40 | xfs_inode_t *ip, |
| 41 | xfs_bmbt_irec_t *imap) |
| 42 | { |
| 43 | xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, |
| 44 | "Access to block zero in inode %llu " |
| 45 | "start_block: %llx start_off: %llx " |
| 46 | "blkcnt: %llx extent-state: %x", |
| 47 | (unsigned long long)ip->i_ino, |
| 48 | (unsigned long long)imap->br_startblock, |
| 49 | (unsigned long long)imap->br_startoff, |
| 50 | (unsigned long long)imap->br_blockcount, |
| 51 | imap->br_state); |
| 52 | return -EFSCORRUPTED; |
| 53 | } |
| 54 | |
| 55 | int |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 56 | xfs_bmbt_to_iomap( |
| 57 | struct xfs_inode *ip, |
| 58 | struct iomap *iomap, |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 59 | struct xfs_bmbt_irec *imap, |
| 60 | bool shared) |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 61 | { |
| 62 | struct xfs_mount *mp = ip->i_mount; |
| 63 | |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 64 | if (unlikely(!imap->br_startblock && !XFS_IS_REALTIME_INODE(ip))) |
| 65 | return xfs_alert_fsblock_zero(ip, imap); |
| 66 | |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 67 | if (imap->br_startblock == HOLESTARTBLOCK) { |
Andreas Gruenbacher | 19fe5f6 | 2017-10-01 17:55:54 -0400 | [diff] [blame] | 68 | iomap->addr = IOMAP_NULL_ADDR; |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 69 | iomap->type = IOMAP_HOLE; |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 70 | } else if (imap->br_startblock == DELAYSTARTBLOCK || |
| 71 | isnullstartblock(imap->br_startblock)) { |
Andreas Gruenbacher | 19fe5f6 | 2017-10-01 17:55:54 -0400 | [diff] [blame] | 72 | iomap->addr = IOMAP_NULL_ADDR; |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 73 | iomap->type = IOMAP_DELALLOC; |
| 74 | } else { |
Andreas Gruenbacher | 19fe5f6 | 2017-10-01 17:55:54 -0400 | [diff] [blame] | 75 | iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock)); |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 76 | if (imap->br_state == XFS_EXT_UNWRITTEN) |
| 77 | iomap->type = IOMAP_UNWRITTEN; |
| 78 | else |
| 79 | iomap->type = IOMAP_MAPPED; |
| 80 | } |
| 81 | iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); |
| 82 | iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); |
| 83 | iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip)); |
Dan Williams | 486aff5 | 2017-08-24 15:12:50 -0700 | [diff] [blame] | 84 | iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip)); |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 85 | |
| 86 | if (xfs_ipincount(ip) && |
| 87 | (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) |
| 88 | iomap->flags |= IOMAP_F_DIRTY; |
| 89 | if (shared) |
| 90 | iomap->flags |= IOMAP_F_SHARED; |
| 91 | return 0; |
Christoph Hellwig | e9c4973 | 2016-09-19 11:09:12 +1000 | [diff] [blame] | 92 | } |
| 93 | |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 94 | static void |
| 95 | xfs_hole_to_iomap( |
| 96 | struct xfs_inode *ip, |
| 97 | struct iomap *iomap, |
| 98 | xfs_fileoff_t offset_fsb, |
| 99 | xfs_fileoff_t end_fsb) |
| 100 | { |
| 101 | iomap->addr = IOMAP_NULL_ADDR; |
| 102 | iomap->type = IOMAP_HOLE; |
| 103 | iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb); |
| 104 | iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb); |
| 105 | iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip)); |
| 106 | iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip)); |
| 107 | } |
| 108 | |
Darrick J. Wong | f7ca352 | 2016-10-03 09:11:43 -0700 | [diff] [blame] | 109 | xfs_extlen_t |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 110 | xfs_eof_alignment( |
| 111 | struct xfs_inode *ip, |
| 112 | xfs_extlen_t extsize) |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 113 | { |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 114 | struct xfs_mount *mp = ip->i_mount; |
| 115 | xfs_extlen_t align = 0; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 116 | |
Christoph Hellwig | bf322d9 | 2011-12-18 20:00:05 +0000 | [diff] [blame] | 117 | if (!XFS_IS_REALTIME_INODE(ip)) { |
| 118 | /* |
| 119 | * Round up the allocation request to a stripe unit |
| 120 | * (m_dalign) boundary if the file size is >= stripe unit |
| 121 | * size, and we are allocating past the allocation eof. |
| 122 | * |
| 123 | * If mounted with the "-o swalloc" option the alignment is |
| 124 | * increased from the strip unit size to the stripe width. |
| 125 | */ |
| 126 | if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) |
| 127 | align = mp->m_swidth; |
| 128 | else if (mp->m_dalign) |
| 129 | align = mp->m_dalign; |
| 130 | |
Peter Watkins | 76b5730 | 2014-12-04 09:30:51 +1100 | [diff] [blame] | 131 | if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) |
| 132 | align = 0; |
Christoph Hellwig | bf322d9 | 2011-12-18 20:00:05 +0000 | [diff] [blame] | 133 | } |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 134 | |
| 135 | /* |
| 136 | * Always round up the allocation request to an extent boundary |
| 137 | * (when file on a real-time subvolume or has di_extsize hint). |
| 138 | */ |
| 139 | if (extsize) { |
Peter Watkins | 76b5730 | 2014-12-04 09:30:51 +1100 | [diff] [blame] | 140 | if (align) |
| 141 | align = roundup_64(align, extsize); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 142 | else |
| 143 | align = extsize; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 144 | } |
| 145 | |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 146 | return align; |
| 147 | } |
| 148 | |
| 149 | STATIC int |
| 150 | xfs_iomap_eof_align_last_fsb( |
| 151 | struct xfs_inode *ip, |
| 152 | xfs_extlen_t extsize, |
| 153 | xfs_fileoff_t *last_fsb) |
| 154 | { |
| 155 | xfs_extlen_t align = xfs_eof_alignment(ip, extsize); |
| 156 | |
Peter Watkins | 76b5730 | 2014-12-04 09:30:51 +1100 | [diff] [blame] | 157 | if (align) { |
| 158 | xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align); |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 159 | int eof, error; |
| 160 | |
Lachlan McIlroy | 541d7d3 | 2007-10-11 17:34:33 +1000 | [diff] [blame] | 161 | error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 162 | if (error) |
| 163 | return error; |
| 164 | if (eof) |
| 165 | *last_fsb = new_last_fsb; |
| 166 | } |
| 167 | return 0; |
| 168 | } |
| 169 | |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 170 | int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | xfs_iomap_write_direct( |
| 172 | xfs_inode_t *ip, |
Nathan Scott | f403b7f | 2005-05-05 13:33:40 -0700 | [diff] [blame] | 173 | xfs_off_t offset, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | size_t count, |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 175 | xfs_bmbt_irec_t *imap, |
Christoph Hellwig | 405f804 | 2010-12-10 08:42:19 +0000 | [diff] [blame] | 176 | int nmaps) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | { |
| 178 | xfs_mount_t *mp = ip->i_mount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | xfs_fileoff_t offset_fsb; |
| 180 | xfs_fileoff_t last_fsb; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 181 | xfs_filblks_t count_fsb, resaligned; |
Christoph Hellwig | f13eb20 | 2017-02-06 10:42:26 -0800 | [diff] [blame] | 182 | xfs_extlen_t extsz; |
Eric Sandeen | 0116d93 | 2005-11-02 15:00:01 +1100 | [diff] [blame] | 183 | int nimaps; |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 184 | int quota_flag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | int rt; |
| 186 | xfs_trans_t *tp; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 187 | uint qblocks, resblks, resrtextents; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 188 | int error; |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 189 | int lockmode; |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 190 | int bmapi_flags = XFS_BMAPI_PREALLOC; |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 191 | uint tflags = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 193 | rt = XFS_IS_REALTIME_INODE(ip); |
David Chinner | 957d0eb | 2007-06-18 16:50:37 +1000 | [diff] [blame] | 194 | extsz = xfs_get_extsz_hint(ip); |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 195 | lockmode = XFS_ILOCK_SHARED; /* locked by caller */ |
| 196 | |
| 197 | ASSERT(xfs_isilocked(ip, lockmode)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | |
David Chinner | 957d0eb | 2007-06-18 16:50:37 +1000 | [diff] [blame] | 199 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 200 | last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); |
Christoph Hellwig | ce7ae151 | 2011-12-18 20:00:11 +0000 | [diff] [blame] | 201 | if ((offset + count) > XFS_ISIZE(ip)) { |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 202 | /* |
| 203 | * Assert that the in-core extent list is present since this can |
| 204 | * call xfs_iread_extents() and we only have the ilock shared. |
| 205 | * This should be safe because the lock was held around a bmapi |
| 206 | * call in the caller and we only need it to access the in-core |
| 207 | * list. |
| 208 | */ |
| 209 | ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags & |
| 210 | XFS_IFEXTENTS); |
Christoph Hellwig | f8e3a82 | 2016-09-19 11:09:28 +1000 | [diff] [blame] | 211 | error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 212 | if (error) |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 213 | goto out_unlock; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 214 | } else { |
Christoph Hellwig | 405f804 | 2010-12-10 08:42:19 +0000 | [diff] [blame] | 215 | if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) |
Dave Chinner | 9bb54cb | 2018-06-07 07:54:02 -0700 | [diff] [blame] | 216 | last_fsb = min(last_fsb, (xfs_fileoff_t) |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 217 | imap->br_blockcount + |
| 218 | imap->br_startoff); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 219 | } |
| 220 | count_fsb = last_fsb - offset_fsb; |
| 221 | ASSERT(count_fsb > 0); |
Christoph Hellwig | f13eb20 | 2017-02-06 10:42:26 -0800 | [diff] [blame] | 222 | resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 223 | |
| 224 | if (unlikely(rt)) { |
| 225 | resrtextents = qblocks = resaligned; |
| 226 | resrtextents /= mp->m_sb.sb_rextsize; |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 227 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); |
| 228 | quota_flag = XFS_QMOPT_RES_RTBLKS; |
| 229 | } else { |
| 230 | resrtextents = 0; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 231 | resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); |
David Chinner | 84e1e99 | 2007-06-18 16:50:27 +1000 | [diff] [blame] | 232 | quota_flag = XFS_QMOPT_RES_REGBLKS; |
| 233 | } |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 234 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | /* |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 236 | * Drop the shared lock acquired by the caller, attach the dquot if |
| 237 | * necessary and move on to transaction setup. |
| 238 | */ |
| 239 | xfs_iunlock(ip, lockmode); |
Darrick J. Wong | c14cfcc | 2018-05-04 15:30:21 -0700 | [diff] [blame] | 240 | error = xfs_qm_dqattach(ip); |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 241 | if (error) |
| 242 | return error; |
| 243 | |
| 244 | /* |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 245 | * For DAX, we do not allocate unwritten extents, but instead we zero |
| 246 | * the block before we commit the transaction. Ideally we'd like to do |
| 247 | * this outside the transaction context, but if we commit and then crash |
| 248 | * we may not have zeroed the blocks and this will be exposed on |
| 249 | * recovery of the allocation. Hence we must zero before commit. |
Dave Chinner | 3b0fe47 | 2016-01-04 16:22:45 +1100 | [diff] [blame] | 250 | * |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 251 | * Further, if we are mapping unwritten extents here, we need to zero |
| 252 | * and convert them to written so that we don't need an unwritten extent |
| 253 | * callback for DAX. This also means that we need to be able to dip into |
Dave Chinner | 3b0fe47 | 2016-01-04 16:22:45 +1100 | [diff] [blame] | 254 | * the reserve block pool for bmbt block allocation if there is no space |
| 255 | * left but we need to do unwritten extent conversion. |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 256 | */ |
| 257 | if (IS_DAX(VFS_I(ip))) { |
| 258 | bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO; |
Christoph Hellwig | 63fbb4c | 2017-03-28 14:53:36 -0700 | [diff] [blame] | 259 | if (imap->br_state == XFS_EXT_UNWRITTEN) { |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 260 | tflags |= XFS_TRANS_RESERVE; |
Dave Chinner | 3b0fe47 | 2016-01-04 16:22:45 +1100 | [diff] [blame] | 261 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; |
| 262 | } |
Dave Chinner | 1ca1915 | 2015-11-03 12:37:00 +1100 | [diff] [blame] | 263 | } |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 264 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents, |
| 265 | tflags, &tp); |
| 266 | if (error) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 267 | return error; |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 268 | |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 269 | lockmode = XFS_ILOCK_EXCL; |
| 270 | xfs_ilock(ip, lockmode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 272 | error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 273 | if (error) |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 274 | goto out_trans_cancel; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | |
Christoph Hellwig | ddc3415 | 2011-09-19 15:00:54 +0000 | [diff] [blame] | 276 | xfs_trans_ijoin(tp, ip, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | /* |
Christoph Hellwig | 3070451 | 2010-06-24 11:42:19 +1000 | [diff] [blame] | 279 | * From this point onwards we overwrite the imap pointer that the |
| 280 | * caller gave to us. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | */ |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 282 | nimaps = 1; |
Christoph Hellwig | d531d91 | 2014-02-10 10:27:43 +1100 | [diff] [blame] | 283 | error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, |
Brian Foster | a7beabe | 2018-07-11 22:26:25 -0700 | [diff] [blame] | 284 | bmapi_flags, resblks, imap, &nimaps); |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 285 | if (error) |
Brian Foster | c8eac49 | 2018-07-24 13:43:13 -0700 | [diff] [blame] | 286 | goto out_res_cancel; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | |
| 288 | /* |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 289 | * Complete the transaction |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | */ |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 291 | error = xfs_trans_commit(tp); |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 292 | if (error) |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 293 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | |
Nathan Scott | 06d10dd | 2005-06-21 15:48:47 +1000 | [diff] [blame] | 295 | /* |
| 296 | * Copy any maps to caller's array and return any error. |
| 297 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | if (nimaps == 0) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 299 | error = -ENOSPC; |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 300 | goto out_unlock; |
Nathan Scott | 572d95f | 2006-09-28 11:03:20 +1000 | [diff] [blame] | 301 | } |
| 302 | |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 303 | if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) |
Dave Chinner | 6d4a8ec | 2011-03-07 10:06:35 +1100 | [diff] [blame] | 304 | error = xfs_alert_fsblock_zero(ip, imap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 306 | out_unlock: |
Brian Foster | 009c6e8 | 2015-10-12 15:34:20 +1100 | [diff] [blame] | 307 | xfs_iunlock(ip, lockmode); |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 308 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | |
Brian Foster | c8eac49 | 2018-07-24 13:43:13 -0700 | [diff] [blame] | 310 | out_res_cancel: |
Dave Chinner | ea562ed | 2012-05-08 20:48:53 +1000 | [diff] [blame] | 311 | xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 312 | out_trans_cancel: |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 313 | xfs_trans_cancel(tp); |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 314 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | } |
| 316 | |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 317 | STATIC bool |
| 318 | xfs_quota_need_throttle( |
| 319 | struct xfs_inode *ip, |
| 320 | int type, |
| 321 | xfs_fsblock_t alloc_blocks) |
| 322 | { |
| 323 | struct xfs_dquot *dq = xfs_inode_dquot(ip, type); |
| 324 | |
| 325 | if (!dq || !xfs_this_quota_on(ip->i_mount, type)) |
| 326 | return false; |
| 327 | |
| 328 | /* no hi watermark, no throttle */ |
| 329 | if (!dq->q_prealloc_hi_wmark) |
| 330 | return false; |
| 331 | |
| 332 | /* under the lo watermark, no throttle */ |
| 333 | if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark) |
| 334 | return false; |
| 335 | |
| 336 | return true; |
| 337 | } |
| 338 | |
| 339 | STATIC void |
| 340 | xfs_quota_calc_throttle( |
| 341 | struct xfs_inode *ip, |
| 342 | int type, |
| 343 | xfs_fsblock_t *qblocks, |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 344 | int *qshift, |
| 345 | int64_t *qfreesp) |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 346 | { |
| 347 | int64_t freesp; |
| 348 | int shift = 0; |
| 349 | struct xfs_dquot *dq = xfs_inode_dquot(ip, type); |
| 350 | |
Eric Sandeen | 5cca3f6 | 2014-10-02 09:27:09 +1000 | [diff] [blame] | 351 | /* no dq, or over hi wmark, squash the prealloc completely */ |
| 352 | if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 353 | *qblocks = 0; |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 354 | *qfreesp = 0; |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 355 | return; |
| 356 | } |
| 357 | |
| 358 | freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount; |
| 359 | if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { |
| 360 | shift = 2; |
| 361 | if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) |
| 362 | shift += 2; |
| 363 | if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) |
| 364 | shift += 2; |
| 365 | } |
| 366 | |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 367 | if (freesp < *qfreesp) |
| 368 | *qfreesp = freesp; |
| 369 | |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 370 | /* only overwrite the throttle values if we are more aggressive */ |
| 371 | if ((freesp >> shift) < (*qblocks >> *qshift)) { |
| 372 | *qblocks = freesp; |
| 373 | *qshift = shift; |
| 374 | } |
| 375 | } |
| 376 | |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 377 | /* |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 378 | * If we are doing a write at the end of the file and there are no allocations |
| 379 | * past this one, then extend the allocation out to the file system's write |
| 380 | * iosize. |
| 381 | * |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 382 | * If we don't have a user specified preallocation size, dynamically increase |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 383 | * the preallocation size as the size of the file grows. Cap the maximum size |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 384 | * at a single extent or less if the filesystem is near full. The closer the |
| 385 | * filesystem is to full, the smaller the maximum prealocation. |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 386 | * |
| 387 | * As an exception we don't do any preallocation at all if the file is smaller |
| 388 | * than the minimum preallocation and we are using the default dynamic |
| 389 | * preallocation scheme, as it is likely this is the only write to the file that |
| 390 | * is going to be done. |
| 391 | * |
| 392 | * We clean up any extra space left over when the file is closed in |
| 393 | * xfs_inactive(). |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 394 | */ |
| 395 | STATIC xfs_fsblock_t |
| 396 | xfs_iomap_prealloc_size( |
Dave Chinner | a1e16c2 | 2013-02-11 16:05:01 +1100 | [diff] [blame] | 397 | struct xfs_inode *ip, |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 398 | int whichfork, |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 399 | loff_t offset, |
| 400 | loff_t count, |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 401 | struct xfs_iext_cursor *icur) |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 402 | { |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 403 | struct xfs_mount *mp = ip->i_mount; |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 404 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 405 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
Christoph Hellwig | 656152e | 2016-11-24 11:39:44 +1100 | [diff] [blame] | 406 | struct xfs_bmbt_irec prev; |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 407 | int shift = 0; |
| 408 | int64_t freesp; |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 409 | xfs_fsblock_t qblocks; |
| 410 | int qshift = 0; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 411 | xfs_fsblock_t alloc_blocks = 0; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 412 | |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 413 | if (offset + count <= XFS_ISIZE(ip)) |
| 414 | return 0; |
| 415 | |
| 416 | if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) && |
| 417 | (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))) |
| 418 | return 0; |
| 419 | |
| 420 | /* |
| 421 | * If an explicit allocsize is set, the file is small, or we |
| 422 | * are writing behind a hole, then use the minimum prealloc: |
| 423 | */ |
| 424 | if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) || |
| 425 | XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 426 | !xfs_iext_peek_prev_extent(ifp, icur, &prev) || |
Christoph Hellwig | 656152e | 2016-11-24 11:39:44 +1100 | [diff] [blame] | 427 | prev.br_startoff + prev.br_blockcount < offset_fsb) |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 428 | return mp->m_writeio_blocks; |
| 429 | |
| 430 | /* |
| 431 | * Determine the initial size of the preallocation. We are beyond the |
| 432 | * current EOF here, but we need to take into account whether this is |
| 433 | * a sparse write or an extending write when determining the |
| 434 | * preallocation size. Hence we need to look up the extent that ends |
| 435 | * at the current write offset and use the result to determine the |
| 436 | * preallocation size. |
| 437 | * |
| 438 | * If the extent is a hole, then preallocation is essentially disabled. |
| 439 | * Otherwise we take the size of the preceding data extent as the basis |
| 440 | * for the preallocation size. If the size of the extent is greater than |
| 441 | * half the maximum extent length, then use the current offset as the |
| 442 | * basis. This ensures that for large files the preallocation size |
| 443 | * always extends to MAXEXTLEN rather than falling short due to things |
| 444 | * like stripe unit/width alignment of real extents. |
| 445 | */ |
Christoph Hellwig | 656152e | 2016-11-24 11:39:44 +1100 | [diff] [blame] | 446 | if (prev.br_blockcount <= (MAXEXTLEN >> 1)) |
| 447 | alloc_blocks = prev.br_blockcount << 1; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 448 | else |
| 449 | alloc_blocks = XFS_B_TO_FSB(mp, offset); |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 450 | if (!alloc_blocks) |
| 451 | goto check_writeio; |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 452 | qblocks = alloc_blocks; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 453 | |
Brian Foster | c9bdbdc | 2013-03-18 10:51:44 -0400 | [diff] [blame] | 454 | /* |
| 455 | * MAXEXTLEN is not a power of two value but we round the prealloc down |
| 456 | * to the nearest power of two value after throttling. To prevent the |
| 457 | * round down from unconditionally reducing the maximum supported prealloc |
| 458 | * size, we round up first, apply appropriate throttling, round down and |
| 459 | * cap the value to MAXEXTLEN. |
| 460 | */ |
| 461 | alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), |
| 462 | alloc_blocks); |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 463 | |
Dave Chinner | 0d485ad | 2015-02-23 21:22:03 +1100 | [diff] [blame] | 464 | freesp = percpu_counter_read_positive(&mp->m_fdblocks); |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 465 | if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { |
| 466 | shift = 2; |
| 467 | if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) |
| 468 | shift++; |
| 469 | if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) |
| 470 | shift++; |
| 471 | if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) |
| 472 | shift++; |
| 473 | if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) |
| 474 | shift++; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 475 | } |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 476 | |
| 477 | /* |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 478 | * Check each quota to cap the prealloc size, provide a shift value to |
| 479 | * throttle with and adjust amount of available space. |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 480 | */ |
| 481 | if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 482 | xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, |
| 483 | &freesp); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 484 | if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 485 | xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, |
| 486 | &freesp); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 487 | if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) |
Brian Foster | f074051 | 2014-07-24 19:56:08 +1000 | [diff] [blame] | 488 | xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, |
| 489 | &freesp); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 490 | |
| 491 | /* |
| 492 | * The final prealloc size is set to the minimum of free space available |
| 493 | * in each of the quotas and the overall filesystem. |
| 494 | * |
| 495 | * The shift throttle value is set to the maximum value as determined by |
| 496 | * the global low free space values and per-quota low free space values. |
| 497 | */ |
Dave Chinner | 9bb54cb | 2018-06-07 07:54:02 -0700 | [diff] [blame] | 498 | alloc_blocks = min(alloc_blocks, qblocks); |
| 499 | shift = max(shift, qshift); |
Brian Foster | 76a4202 | 2013-03-18 10:51:47 -0400 | [diff] [blame] | 500 | |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 501 | if (shift) |
| 502 | alloc_blocks >>= shift; |
Brian Foster | c9bdbdc | 2013-03-18 10:51:44 -0400 | [diff] [blame] | 503 | /* |
| 504 | * rounddown_pow_of_two() returns an undefined result if we pass in |
| 505 | * alloc_blocks = 0. |
| 506 | */ |
| 507 | if (alloc_blocks) |
| 508 | alloc_blocks = rounddown_pow_of_two(alloc_blocks); |
| 509 | if (alloc_blocks > MAXEXTLEN) |
| 510 | alloc_blocks = MAXEXTLEN; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 511 | |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 512 | /* |
| 513 | * If we are still trying to allocate more space than is |
| 514 | * available, squash the prealloc hard. This can happen if we |
| 515 | * have a large file on a small filesystem and the above |
| 516 | * lowspace thresholds are smaller than MAXEXTLEN. |
| 517 | */ |
| 518 | while (alloc_blocks && alloc_blocks >= freesp) |
| 519 | alloc_blocks >>= 4; |
Brian Foster | 3c58b5f | 2013-03-18 10:51:43 -0400 | [diff] [blame] | 520 | check_writeio: |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 521 | if (alloc_blocks < mp->m_writeio_blocks) |
| 522 | alloc_blocks = mp->m_writeio_blocks; |
Brian Foster | 19cb7e3 | 2013-03-18 10:51:48 -0400 | [diff] [blame] | 523 | trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, |
| 524 | mp->m_writeio_blocks); |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 525 | return alloc_blocks; |
| 526 | } |
| 527 | |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 528 | static int |
| 529 | xfs_file_iomap_begin_delay( |
| 530 | struct inode *inode, |
| 531 | loff_t offset, |
| 532 | loff_t count, |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 533 | unsigned flags, |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 534 | struct iomap *iomap) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | { |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 536 | struct xfs_inode *ip = XFS_I(inode); |
| 537 | struct xfs_mount *mp = ip->i_mount; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 538 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 539 | xfs_fileoff_t maxbytes_fsb = |
| 540 | XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 541 | xfs_fileoff_t end_fsb; |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 542 | struct xfs_bmbt_irec imap, cmap; |
| 543 | struct xfs_iext_cursor icur, ccur; |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 544 | xfs_fsblock_t prealloc_blocks = 0; |
Christoph Hellwig | c4feb0b | 2019-02-18 09:38:48 -0800 | [diff] [blame] | 545 | bool eof = false, cow_eof = false, shared = false; |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 546 | int whichfork = XFS_DATA_FORK; |
| 547 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 549 | ASSERT(!XFS_IS_REALTIME_INODE(ip)); |
| 550 | ASSERT(!xfs_get_extsz_hint(ip)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 552 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
| 553 | |
| 554 | if (unlikely(XFS_TEST_ERROR( |
| 555 | (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && |
| 556 | XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), |
Darrick J. Wong | 9e24cfd | 2017-06-20 17:54:47 -0700 | [diff] [blame] | 557 | mp, XFS_ERRTAG_BMAPIFORMAT))) { |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 558 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); |
| 559 | error = -EFSCORRUPTED; |
| 560 | goto out_unlock; |
| 561 | } |
| 562 | |
| 563 | XFS_STATS_INC(mp, xs_blk_mapw); |
| 564 | |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 565 | if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) { |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 566 | error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); |
| 567 | if (error) |
| 568 | goto out_unlock; |
| 569 | } |
| 570 | |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 571 | end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb); |
| 572 | |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 573 | /* |
| 574 | * Search the data fork fork first to look up our source mapping. We |
| 575 | * always need the data fork map, as we have to return it to the |
| 576 | * iomap code so that the higher level write code can read data in to |
| 577 | * perform read-modify-write cycles for unaligned writes. |
| 578 | */ |
| 579 | eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap); |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 580 | if (eof) |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 581 | imap.br_startoff = end_fsb; /* fake hole until the end */ |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 582 | |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 583 | /* We never need to allocate blocks for zeroing a hole. */ |
| 584 | if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) { |
| 585 | xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff); |
| 586 | goto out_unlock; |
| 587 | } |
| 588 | |
| 589 | /* |
| 590 | * Search the COW fork extent list even if we did not find a data fork |
| 591 | * extent. This serves two purposes: first this implements the |
| 592 | * speculative preallocation using cowextsize, so that we also unshare |
| 593 | * block adjacent to shared blocks instead of just the shared blocks |
| 594 | * themselves. Second the lookup in the extent list is generally faster |
| 595 | * than going out to the shared extent tree. |
| 596 | */ |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 597 | if (xfs_is_cow_inode(ip)) { |
| 598 | if (!ip->i_cowfp) { |
| 599 | ASSERT(!xfs_is_reflink_inode(ip)); |
| 600 | xfs_ifork_init_cow(ip); |
| 601 | } |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 602 | cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, |
| 603 | &ccur, &cmap); |
| 604 | if (!cow_eof && cmap.br_startoff <= offset_fsb) { |
| 605 | trace_xfs_reflink_cow_found(ip, &cmap); |
| 606 | whichfork = XFS_COW_FORK; |
| 607 | goto done; |
| 608 | } |
| 609 | } |
| 610 | |
| 611 | if (imap.br_startoff <= offset_fsb) { |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 612 | /* |
| 613 | * For reflink files we may need a delalloc reservation when |
| 614 | * overwriting shared extents. This includes zeroing of |
| 615 | * existing extents that contain data. |
| 616 | */ |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 617 | if (!xfs_is_cow_inode(ip) || |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 618 | ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) { |
| 619 | trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, |
| 620 | &imap); |
| 621 | goto done; |
Christoph Hellwig | 3ba020b | 2016-10-20 15:53:50 +1100 | [diff] [blame] | 622 | } |
| 623 | |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 624 | xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 625 | |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 626 | /* Trim the mapping to the nearest shared extent boundary. */ |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 627 | error = xfs_inode_need_cow(ip, &imap, &shared); |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 628 | if (error) |
| 629 | goto out_unlock; |
| 630 | |
| 631 | /* Not shared? Just report the (potentially capped) extent. */ |
| 632 | if (!shared) { |
| 633 | trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, |
| 634 | &imap); |
| 635 | goto done; |
| 636 | } |
| 637 | |
| 638 | /* |
| 639 | * Fork all the shared blocks from our write offset until the |
| 640 | * end of the extent. |
| 641 | */ |
| 642 | whichfork = XFS_COW_FORK; |
| 643 | end_fsb = imap.br_startoff + imap.br_blockcount; |
| 644 | } else { |
| 645 | /* |
| 646 | * We cap the maximum length we map here to MAX_WRITEBACK_PAGES |
| 647 | * pages to keep the chunks of work done where somewhat |
| 648 | * symmetric with the work writeback does. This is a completely |
| 649 | * arbitrary number pulled out of thin air. |
| 650 | * |
| 651 | * Note that the values needs to be less than 32-bits wide until |
| 652 | * the lower level functions are updated. |
| 653 | */ |
| 654 | count = min_t(loff_t, count, 1024 * PAGE_SIZE); |
| 655 | end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb); |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 656 | |
| 657 | if (xfs_is_always_cow_inode(ip)) |
| 658 | whichfork = XFS_COW_FORK; |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 659 | } |
| 660 | |
Darrick J. Wong | 4882c19 | 2018-05-04 15:30:22 -0700 | [diff] [blame] | 661 | error = xfs_qm_dqattach_locked(ip, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | if (error) |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 663 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 665 | if (eof) { |
| 666 | prealloc_blocks = xfs_iomap_prealloc_size(ip, whichfork, offset, |
| 667 | count, &icur); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 668 | if (prealloc_blocks) { |
| 669 | xfs_extlen_t align; |
| 670 | xfs_off_t end_offset; |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 671 | xfs_fileoff_t p_end_fsb; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 672 | |
| 673 | end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1); |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 674 | p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) + |
| 675 | prealloc_blocks; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 676 | |
| 677 | align = xfs_eof_alignment(ip, 0); |
| 678 | if (align) |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 679 | p_end_fsb = roundup_64(p_end_fsb, align); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 680 | |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 681 | p_end_fsb = min(p_end_fsb, maxbytes_fsb); |
| 682 | ASSERT(p_end_fsb > offset_fsb); |
| 683 | prealloc_blocks = p_end_fsb - end_fsb; |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 684 | } |
| 685 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | |
Dave Chinner | 8de2bf9 | 2009-04-06 18:49:12 +0200 | [diff] [blame] | 687 | retry: |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 688 | error = xfs_bmapi_reserve_delalloc(ip, whichfork, offset_fsb, |
| 689 | end_fsb - offset_fsb, prealloc_blocks, |
| 690 | whichfork == XFS_DATA_FORK ? &imap : &cmap, |
| 691 | whichfork == XFS_DATA_FORK ? &icur : &ccur, |
| 692 | whichfork == XFS_DATA_FORK ? eof : cow_eof); |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 693 | switch (error) { |
| 694 | case 0: |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 695 | break; |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 696 | case -ENOSPC: |
| 697 | case -EDQUOT: |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 698 | /* retry without any preallocation */ |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 699 | trace_xfs_delalloc_enospc(ip, offset, count); |
Brian Foster | f782088 | 2016-11-28 14:57:42 +1100 | [diff] [blame] | 700 | if (prealloc_blocks) { |
| 701 | prealloc_blocks = 0; |
Dave Chinner | 9aa0500 | 2012-10-08 21:56:04 +1100 | [diff] [blame] | 702 | goto retry; |
Dave Chinner | 055388a | 2011-01-04 11:35:03 +1100 | [diff] [blame] | 703 | } |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 704 | /*FALLTHRU*/ |
| 705 | default: |
| 706 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | } |
| 708 | |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 709 | /* |
| 710 | * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch |
| 711 | * them out if the write happens to fail. |
| 712 | */ |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 713 | iomap->flags |= IOMAP_F_NEW; |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 714 | trace_xfs_iomap_alloc(ip, offset, count, whichfork, |
| 715 | whichfork == XFS_DATA_FORK ? &imap : &cmap); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 716 | done: |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 717 | if (whichfork == XFS_COW_FORK) { |
| 718 | if (imap.br_startoff > offset_fsb) { |
| 719 | xfs_trim_extent(&cmap, offset_fsb, |
| 720 | imap.br_startoff - offset_fsb); |
Christoph Hellwig | c4feb0b | 2019-02-18 09:38:48 -0800 | [diff] [blame] | 721 | error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true); |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 722 | goto out_unlock; |
| 723 | } |
| 724 | /* ensure we only report blocks we have a reservation for */ |
| 725 | xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount); |
Christoph Hellwig | c4feb0b | 2019-02-18 09:38:48 -0800 | [diff] [blame] | 726 | shared = true; |
Christoph Hellwig | db46e60 | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 727 | } |
Christoph Hellwig | c4feb0b | 2019-02-18 09:38:48 -0800 | [diff] [blame] | 728 | error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 729 | out_unlock: |
| 730 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 731 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | } |
| 733 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | int |
| 735 | xfs_iomap_write_unwritten( |
| 736 | xfs_inode_t *ip, |
Nathan Scott | f403b7f | 2005-05-05 13:33:40 -0700 | [diff] [blame] | 737 | xfs_off_t offset, |
Eryu Guan | ee70daa | 2017-09-21 11:26:18 -0700 | [diff] [blame] | 738 | xfs_off_t count, |
| 739 | bool update_isize) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | { |
| 741 | xfs_mount_t *mp = ip->i_mount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | xfs_fileoff_t offset_fsb; |
| 743 | xfs_filblks_t count_fsb; |
| 744 | xfs_filblks_t numblks_fsb; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 745 | int nimaps; |
| 746 | xfs_trans_t *tp; |
| 747 | xfs_bmbt_irec_t imap; |
Eryu Guan | ee70daa | 2017-09-21 11:26:18 -0700 | [diff] [blame] | 748 | struct inode *inode = VFS_I(ip); |
Christoph Hellwig | 84803fb | 2012-02-29 09:53:50 +0000 | [diff] [blame] | 749 | xfs_fsize_t i_size; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 750 | uint resblks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 753 | trace_xfs_unwritten_convert(ip, offset, count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | |
| 755 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 756 | count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); |
| 757 | count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); |
| 758 | |
Lachlan McIlroy | 4ddd8bb | 2008-06-27 13:32:53 +1000 | [diff] [blame] | 759 | /* |
| 760 | * Reserve enough blocks in this transaction for two complete extent |
| 761 | * btree splits. We may be converting the middle part of an unwritten |
| 762 | * extent and in this case we will insert two new extents in the btree |
| 763 | * each of which could cause a full split. |
| 764 | * |
| 765 | * This reservation amount will be used in the first call to |
| 766 | * xfs_bmbt_split() to select an AG with enough space to satisfy the |
| 767 | * rest of the operation. |
| 768 | */ |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 769 | resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 771 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 | /* |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 773 | * Set up a transaction to convert the range of extents |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | * from unwritten to real. Do allocations in a loop until |
| 775 | * we have covered the range passed in. |
Christoph Hellwig | 80641dc | 2009-10-19 04:00:03 +0000 | [diff] [blame] | 776 | * |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 777 | * Note that we can't risk to recursing back into the filesystem |
| 778 | * here as we might be asked to write out the same inode that we |
| 779 | * complete here and might deadlock on the iolock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | */ |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 781 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, |
| 782 | XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp); |
| 783 | if (error) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 784 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | |
| 786 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
Christoph Hellwig | ddc3415 | 2011-09-19 15:00:54 +0000 | [diff] [blame] | 787 | xfs_trans_ijoin(tp, ip, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | |
| 789 | /* |
| 790 | * Modify the unwritten extent state of the buffer. |
| 791 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | nimaps = 1; |
Dave Chinner | c0dc782 | 2011-09-18 20:40:52 +0000 | [diff] [blame] | 793 | error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, |
Brian Foster | a7beabe | 2018-07-11 22:26:25 -0700 | [diff] [blame] | 794 | XFS_BMAPI_CONVERT, resblks, &imap, |
| 795 | &nimaps); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | if (error) |
| 797 | goto error_on_bmapi_transaction; |
| 798 | |
Christoph Hellwig | 84803fb | 2012-02-29 09:53:50 +0000 | [diff] [blame] | 799 | /* |
| 800 | * Log the updated inode size as we go. We have to be careful |
| 801 | * to only log it up to the actual write offset if it is |
| 802 | * halfway into a block. |
| 803 | */ |
| 804 | i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); |
| 805 | if (i_size > offset + count) |
| 806 | i_size = offset + count; |
Eryu Guan | ee70daa | 2017-09-21 11:26:18 -0700 | [diff] [blame] | 807 | if (update_isize && i_size > i_size_read(inode)) |
| 808 | i_size_write(inode, i_size); |
Christoph Hellwig | 84803fb | 2012-02-29 09:53:50 +0000 | [diff] [blame] | 809 | i_size = xfs_new_eof(ip, i_size); |
| 810 | if (i_size) { |
| 811 | ip->i_d.di_size = i_size; |
| 812 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
| 813 | } |
| 814 | |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 815 | error = xfs_trans_commit(tp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 817 | if (error) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 818 | return error; |
Nathan Scott | dd9f438 | 2006-01-11 15:28:28 +1100 | [diff] [blame] | 819 | |
David Chinner | 86c4d62 | 2008-04-29 12:53:21 +1000 | [diff] [blame] | 820 | if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) |
Dave Chinner | 6d4a8ec | 2011-03-07 10:06:35 +1100 | [diff] [blame] | 821 | return xfs_alert_fsblock_zero(ip, &imap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 | |
| 823 | if ((numblks_fsb = imap.br_blockcount) == 0) { |
| 824 | /* |
| 825 | * The numblks_fsb value should always get |
| 826 | * smaller, otherwise the loop is stuck. |
| 827 | */ |
| 828 | ASSERT(imap.br_blockcount); |
| 829 | break; |
| 830 | } |
| 831 | offset_fsb += numblks_fsb; |
| 832 | count_fsb -= numblks_fsb; |
| 833 | } while (count_fsb > 0); |
| 834 | |
| 835 | return 0; |
| 836 | |
| 837 | error_on_bmapi_transaction: |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 838 | xfs_trans_cancel(tp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 840 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 | } |
Christoph Hellwig | 3b3dce0 | 2016-06-21 09:52:47 +1000 | [diff] [blame] | 842 | |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 843 | static inline bool |
| 844 | imap_needs_alloc( |
| 845 | struct inode *inode, |
| 846 | struct xfs_bmbt_irec *imap, |
| 847 | int nimaps) |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 848 | { |
| 849 | return !nimaps || |
| 850 | imap->br_startblock == HOLESTARTBLOCK || |
Christoph Hellwig | 6c31f495 | 2016-09-19 11:28:38 +1000 | [diff] [blame] | 851 | imap->br_startblock == DELAYSTARTBLOCK || |
Christoph Hellwig | 63fbb4c | 2017-03-28 14:53:36 -0700 | [diff] [blame] | 852 | (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 853 | } |
| 854 | |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 855 | static inline bool |
| 856 | needs_cow_for_zeroing( |
| 857 | struct xfs_bmbt_irec *imap, |
| 858 | int nimaps) |
Christoph Hellwig | 172ed39 | 2018-03-01 14:10:31 -0800 | [diff] [blame] | 859 | { |
| 860 | return nimaps && |
| 861 | imap->br_startblock != HOLESTARTBLOCK && |
| 862 | imap->br_state != XFS_EXT_UNWRITTEN; |
| 863 | } |
| 864 | |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 865 | static int |
| 866 | xfs_ilock_for_iomap( |
| 867 | struct xfs_inode *ip, |
| 868 | unsigned flags, |
| 869 | unsigned *lockmode) |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 870 | { |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 871 | unsigned mode = XFS_ILOCK_SHARED; |
Darrick J. Wong | 5bd88d1 | 2018-06-21 23:26:57 -0700 | [diff] [blame] | 872 | bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO); |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 873 | |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 874 | /* |
Christoph Hellwig | af5b5af | 2018-03-01 14:12:12 -0800 | [diff] [blame] | 875 | * COW writes may allocate delalloc space or convert unwritten COW |
| 876 | * extents, so we need to make sure to take the lock exclusively here. |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 877 | */ |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 878 | if (xfs_is_cow_inode(ip) && is_write) { |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 879 | /* |
| 880 | * FIXME: It could still overwrite on unshared extents and not |
| 881 | * need allocation. |
| 882 | */ |
| 883 | if (flags & IOMAP_NOWAIT) |
| 884 | return -EAGAIN; |
| 885 | mode = XFS_ILOCK_EXCL; |
| 886 | } |
Christoph Hellwig | ff3d8b9 | 2018-03-01 14:12:45 -0800 | [diff] [blame] | 887 | |
| 888 | /* |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 889 | * Extents not yet cached requires exclusive access, don't block. This |
| 890 | * is an opencoded xfs_ilock_data_map_shared() call but with |
Christoph Hellwig | ff3d8b9 | 2018-03-01 14:12:45 -0800 | [diff] [blame] | 891 | * non-blocking behaviour. |
| 892 | */ |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 893 | if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) { |
| 894 | if (flags & IOMAP_NOWAIT) |
| 895 | return -EAGAIN; |
| 896 | mode = XFS_ILOCK_EXCL; |
| 897 | } |
| 898 | |
Darrick J. Wong | 5bd88d1 | 2018-06-21 23:26:57 -0700 | [diff] [blame] | 899 | relock: |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 900 | if (flags & IOMAP_NOWAIT) { |
| 901 | if (!xfs_ilock_nowait(ip, mode)) |
| 902 | return -EAGAIN; |
| 903 | } else { |
| 904 | xfs_ilock(ip, mode); |
| 905 | } |
| 906 | |
Darrick J. Wong | 5bd88d1 | 2018-06-21 23:26:57 -0700 | [diff] [blame] | 907 | /* |
| 908 | * The reflink iflag could have changed since the earlier unlocked |
| 909 | * check, so if we got ILOCK_SHARED for a write and but we're now a |
| 910 | * reflink inode we have to switch to ILOCK_EXCL and relock. |
| 911 | */ |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 912 | if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) { |
Darrick J. Wong | 5bd88d1 | 2018-06-21 23:26:57 -0700 | [diff] [blame] | 913 | xfs_iunlock(ip, mode); |
| 914 | mode = XFS_ILOCK_EXCL; |
| 915 | goto relock; |
| 916 | } |
| 917 | |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 918 | *lockmode = mode; |
| 919 | return 0; |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 920 | } |
| 921 | |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 922 | static int |
| 923 | xfs_file_iomap_begin( |
| 924 | struct inode *inode, |
| 925 | loff_t offset, |
| 926 | loff_t length, |
| 927 | unsigned flags, |
| 928 | struct iomap *iomap) |
| 929 | { |
| 930 | struct xfs_inode *ip = XFS_I(inode); |
| 931 | struct xfs_mount *mp = ip->i_mount; |
| 932 | struct xfs_bmbt_irec imap; |
| 933 | xfs_fileoff_t offset_fsb, end_fsb; |
| 934 | int nimaps = 1, error = 0; |
Christoph Hellwig | d392bc8 | 2018-10-18 17:19:48 +1100 | [diff] [blame] | 935 | bool shared = false; |
Christoph Hellwig | 66642c5 | 2016-09-19 11:26:39 +1000 | [diff] [blame] | 936 | unsigned lockmode; |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 937 | |
| 938 | if (XFS_FORCED_SHUTDOWN(mp)) |
| 939 | return -EIO; |
| 940 | |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 941 | if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && !(flags & IOMAP_DIRECT) && |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 942 | !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) { |
Darrick J. Wong | 2a06705 | 2016-10-03 09:11:33 -0700 | [diff] [blame] | 943 | /* Reserve delalloc blocks for regular writeback. */ |
Christoph Hellwig | 0365c5d | 2018-10-18 17:19:26 +1100 | [diff] [blame] | 944 | return xfs_file_iomap_begin_delay(inode, offset, length, flags, |
| 945 | iomap); |
Christoph Hellwig | 51446f5 | 2016-09-19 11:10:21 +1000 | [diff] [blame] | 946 | } |
| 947 | |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 948 | /* |
| 949 | * Lock the inode in the manner required for the specified operation and |
| 950 | * check for as many conditions that would result in blocking as |
| 951 | * possible. This removes most of the non-blocking checks from the |
| 952 | * mapping code below. |
| 953 | */ |
| 954 | error = xfs_ilock_for_iomap(ip, flags, &lockmode); |
| 955 | if (error) |
| 956 | return error; |
Goldwyn Rodrigues | 29a5d29 | 2017-06-20 07:05:48 -0500 | [diff] [blame] | 957 | |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 958 | ASSERT(offset <= mp->m_super->s_maxbytes); |
Darrick J. Wong | b4d8ad7f | 2017-12-22 13:14:34 -0800 | [diff] [blame] | 959 | if (offset > mp->m_super->s_maxbytes - length) |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 960 | length = mp->m_super->s_maxbytes - offset; |
| 961 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 962 | end_fsb = XFS_B_TO_FSB(mp, offset + length); |
| 963 | |
| 964 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, |
Darrick J. Wong | db1327b | 2016-10-03 09:11:36 -0700 | [diff] [blame] | 965 | &nimaps, 0); |
Christoph Hellwig | 3ba020b | 2016-10-20 15:53:50 +1100 | [diff] [blame] | 966 | if (error) |
| 967 | goto out_unlock; |
Darrick J. Wong | db1327b | 2016-10-03 09:11:36 -0700 | [diff] [blame] | 968 | |
Christoph Hellwig | 3c68d44 | 2017-02-06 10:51:03 -0800 | [diff] [blame] | 969 | if (flags & IOMAP_REPORT) { |
Christoph Hellwig | 5f9268c | 2016-10-20 15:53:32 +1100 | [diff] [blame] | 970 | /* Trim the mapping to the nearest shared extent boundary. */ |
Christoph Hellwig | d392bc8 | 2018-10-18 17:19:48 +1100 | [diff] [blame] | 971 | error = xfs_reflink_trim_around_shared(ip, &imap, &shared); |
Christoph Hellwig | 3ba020b | 2016-10-20 15:53:50 +1100 | [diff] [blame] | 972 | if (error) |
| 973 | goto out_unlock; |
| 974 | } |
| 975 | |
Dave Chinner | d064178 | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 976 | /* Non-modifying mapping requested, so we are done */ |
| 977 | if (!(flags & (IOMAP_WRITE | IOMAP_ZERO))) |
| 978 | goto out_found; |
| 979 | |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 980 | /* |
| 981 | * Break shared extents if necessary. Checks for non-blocking IO have |
| 982 | * been done up front, so we don't need to do them here. |
| 983 | */ |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 984 | if (xfs_is_cow_inode(ip)) { |
Darrick J. Wong | 4f29e10 | 2019-02-21 16:36:17 -0800 | [diff] [blame] | 985 | struct xfs_bmbt_irec cmap; |
Darrick J. Wong | affe250 | 2019-02-21 16:26:35 -0800 | [diff] [blame] | 986 | bool directio = (flags & IOMAP_DIRECT); |
Christoph Hellwig | 78f0cc9 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 987 | |
Dave Chinner | dfa03a5 | 2018-05-02 12:54:54 -0700 | [diff] [blame] | 988 | /* if zeroing doesn't need COW allocation, then we are done. */ |
| 989 | if ((flags & IOMAP_ZERO) && |
| 990 | !needs_cow_for_zeroing(&imap, nimaps)) |
| 991 | goto out_found; |
| 992 | |
Christoph Hellwig | 78f0cc9 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 993 | /* may drop and re-acquire the ilock */ |
Darrick J. Wong | 4f29e10 | 2019-02-21 16:36:17 -0800 | [diff] [blame] | 994 | cmap = imap; |
| 995 | error = xfs_reflink_allocate_cow(ip, &cmap, &shared, &lockmode, |
Darrick J. Wong | affe250 | 2019-02-21 16:26:35 -0800 | [diff] [blame] | 996 | directio); |
Christoph Hellwig | 78f0cc9 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 997 | if (error) |
| 998 | goto out_unlock; |
| 999 | |
| 1000 | /* |
| 1001 | * For buffered writes we need to report the address of the |
| 1002 | * previous block (if there was any) so that the higher level |
Darrick J. Wong | affe250 | 2019-02-21 16:26:35 -0800 | [diff] [blame] | 1003 | * write code can perform read-modify-write operations; we |
| 1004 | * won't need the CoW fork mapping until writeback. For direct |
| 1005 | * I/O, which must be block aligned, we need to report the |
Darrick J. Wong | 4f29e10 | 2019-02-21 16:36:17 -0800 | [diff] [blame] | 1006 | * newly allocated address. If the data fork has a hole, copy |
| 1007 | * the COW fork mapping to avoid allocating to the data fork. |
Christoph Hellwig | 78f0cc9 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1008 | */ |
Darrick J. Wong | 4f29e10 | 2019-02-21 16:36:17 -0800 | [diff] [blame] | 1009 | if (directio || imap.br_startblock == HOLESTARTBLOCK) |
| 1010 | imap = cmap; |
Christoph Hellwig | 3ba020b | 2016-10-20 15:53:50 +1100 | [diff] [blame] | 1011 | |
| 1012 | end_fsb = imap.br_startoff + imap.br_blockcount; |
| 1013 | length = XFS_FSB_TO_B(mp, end_fsb) - offset; |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1014 | } |
| 1015 | |
Dave Chinner | d064178 | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1016 | /* Don't need to allocate over holes when doing zeroing operations. */ |
| 1017 | if (flags & IOMAP_ZERO) |
| 1018 | goto out_found; |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1019 | |
Dave Chinner | d064178 | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1020 | if (!imap_needs_alloc(inode, &imap, nimaps)) |
| 1021 | goto out_found; |
Christoph Hellwig | b95a212 | 2016-08-17 08:44:52 +1000 | [diff] [blame] | 1022 | |
Dave Chinner | d064178 | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1023 | /* If nowait is set bail since we are going to make allocations. */ |
| 1024 | if (flags & IOMAP_NOWAIT) { |
| 1025 | error = -EAGAIN; |
| 1026 | goto out_unlock; |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1027 | } |
| 1028 | |
Dave Chinner | d064178 | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1029 | /* |
| 1030 | * We cap the maximum length we map to a sane size to keep the chunks |
| 1031 | * of work done where somewhat symmetric with the work writeback does. |
| 1032 | * This is a completely arbitrary number pulled out of thin air as a |
| 1033 | * best guess for initial testing. |
| 1034 | * |
| 1035 | * Note that the values needs to be less than 32-bits wide until the |
| 1036 | * lower level functions are updated. |
| 1037 | */ |
| 1038 | length = min_t(loff_t, length, 1024 * PAGE_SIZE); |
| 1039 | |
| 1040 | /* |
| 1041 | * xfs_iomap_write_direct() expects the shared lock. It is unlocked on |
| 1042 | * return. |
| 1043 | */ |
| 1044 | if (lockmode == XFS_ILOCK_EXCL) |
| 1045 | xfs_ilock_demote(ip, lockmode); |
| 1046 | error = xfs_iomap_write_direct(ip, offset, length, &imap, |
| 1047 | nimaps); |
| 1048 | if (error) |
| 1049 | return error; |
| 1050 | |
Christoph Hellwig | c03cea4 | 2018-06-19 15:10:58 -0700 | [diff] [blame] | 1051 | iomap->flags |= IOMAP_F_NEW; |
Christoph Hellwig | be225fe | 2019-02-15 08:02:46 -0800 | [diff] [blame] | 1052 | trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); |
Dave Chinner | d064178 | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1053 | |
| 1054 | out_finish: |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1055 | return xfs_bmbt_to_iomap(ip, iomap, &imap, shared); |
Dave Chinner | d064178 | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1056 | |
| 1057 | out_found: |
| 1058 | ASSERT(nimaps); |
| 1059 | xfs_iunlock(ip, lockmode); |
Christoph Hellwig | be225fe | 2019-02-15 08:02:46 -0800 | [diff] [blame] | 1060 | trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); |
Dave Chinner | d064178 | 2018-05-02 12:54:53 -0700 | [diff] [blame] | 1061 | goto out_finish; |
| 1062 | |
Christoph Hellwig | 3ba020b | 2016-10-20 15:53:50 +1100 | [diff] [blame] | 1063 | out_unlock: |
| 1064 | xfs_iunlock(ip, lockmode); |
| 1065 | return error; |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1066 | } |
| 1067 | |
| 1068 | static int |
| 1069 | xfs_file_iomap_end_delalloc( |
| 1070 | struct xfs_inode *ip, |
| 1071 | loff_t offset, |
| 1072 | loff_t length, |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1073 | ssize_t written, |
| 1074 | struct iomap *iomap) |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1075 | { |
| 1076 | struct xfs_mount *mp = ip->i_mount; |
| 1077 | xfs_fileoff_t start_fsb; |
| 1078 | xfs_fileoff_t end_fsb; |
| 1079 | int error = 0; |
| 1080 | |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1081 | /* |
| 1082 | * Behave as if the write failed if drop writes is enabled. Set the NEW |
| 1083 | * flag to force delalloc cleanup. |
| 1084 | */ |
Darrick J. Wong | f8c4725 | 2017-06-20 17:54:48 -0700 | [diff] [blame] | 1085 | if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) { |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1086 | iomap->flags |= IOMAP_F_NEW; |
Brian Foster | 9dbddd7 | 2017-02-13 22:48:17 -0800 | [diff] [blame] | 1087 | written = 0; |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1088 | } |
Brian Foster | 9dbddd7 | 2017-02-13 22:48:17 -0800 | [diff] [blame] | 1089 | |
Brian Foster | fa7f138 | 2017-02-16 17:19:12 -0800 | [diff] [blame] | 1090 | /* |
| 1091 | * start_fsb refers to the first unused block after a short write. If |
| 1092 | * nothing was written, round offset down to point at the first block in |
| 1093 | * the range. |
| 1094 | */ |
| 1095 | if (unlikely(!written)) |
| 1096 | start_fsb = XFS_B_TO_FSBT(mp, offset); |
| 1097 | else |
| 1098 | start_fsb = XFS_B_TO_FSB(mp, offset + written); |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1099 | end_fsb = XFS_B_TO_FSB(mp, offset + length); |
| 1100 | |
| 1101 | /* |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1102 | * Trim delalloc blocks if they were allocated by this write and we |
| 1103 | * didn't manage to write the whole range. |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1104 | * |
| 1105 | * We don't need to care about racing delalloc as we hold i_mutex |
| 1106 | * across the reserve/allocate/unreserve calls. If there are delalloc |
| 1107 | * blocks in the range, they are ours. |
| 1108 | */ |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1109 | if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) { |
Brian Foster | fa7f138 | 2017-02-16 17:19:12 -0800 | [diff] [blame] | 1110 | truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), |
| 1111 | XFS_FSB_TO_B(mp, end_fsb) - 1); |
| 1112 | |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1113 | error = xfs_bmap_punch_delalloc_range(ip, start_fsb, |
| 1114 | end_fsb - start_fsb); |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1115 | if (error && !XFS_FORCED_SHUTDOWN(mp)) { |
| 1116 | xfs_alert(mp, "%s: unable to clean up ino %lld", |
| 1117 | __func__, ip->i_ino); |
| 1118 | return error; |
| 1119 | } |
| 1120 | } |
| 1121 | |
| 1122 | return 0; |
| 1123 | } |
| 1124 | |
| 1125 | static int |
| 1126 | xfs_file_iomap_end( |
| 1127 | struct inode *inode, |
| 1128 | loff_t offset, |
| 1129 | loff_t length, |
| 1130 | ssize_t written, |
| 1131 | unsigned flags, |
| 1132 | struct iomap *iomap) |
| 1133 | { |
| 1134 | if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) |
| 1135 | return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, |
Brian Foster | f65e6fa | 2017-03-08 09:58:08 -0800 | [diff] [blame] | 1136 | length, written, iomap); |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1137 | return 0; |
| 1138 | } |
| 1139 | |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 1140 | const struct iomap_ops xfs_iomap_ops = { |
Christoph Hellwig | 68a9f5e | 2016-06-21 09:53:44 +1000 | [diff] [blame] | 1141 | .iomap_begin = xfs_file_iomap_begin, |
| 1142 | .iomap_end = xfs_file_iomap_end, |
| 1143 | }; |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1144 | |
| 1145 | static int |
Christoph Hellwig | 60271ab7 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1146 | xfs_seek_iomap_begin( |
| 1147 | struct inode *inode, |
| 1148 | loff_t offset, |
| 1149 | loff_t length, |
| 1150 | unsigned flags, |
| 1151 | struct iomap *iomap) |
| 1152 | { |
| 1153 | struct xfs_inode *ip = XFS_I(inode); |
| 1154 | struct xfs_mount *mp = ip->i_mount; |
| 1155 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 1156 | xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); |
| 1157 | xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF; |
| 1158 | struct xfs_iext_cursor icur; |
| 1159 | struct xfs_bmbt_irec imap, cmap; |
| 1160 | int error = 0; |
| 1161 | unsigned lockmode; |
| 1162 | |
| 1163 | if (XFS_FORCED_SHUTDOWN(mp)) |
| 1164 | return -EIO; |
| 1165 | |
| 1166 | lockmode = xfs_ilock_data_map_shared(ip); |
| 1167 | if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) { |
| 1168 | error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); |
| 1169 | if (error) |
| 1170 | goto out_unlock; |
| 1171 | } |
| 1172 | |
| 1173 | if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) { |
| 1174 | /* |
| 1175 | * If we found a data extent we are done. |
| 1176 | */ |
| 1177 | if (imap.br_startoff <= offset_fsb) |
| 1178 | goto done; |
| 1179 | data_fsb = imap.br_startoff; |
| 1180 | } else { |
| 1181 | /* |
| 1182 | * Fake a hole until the end of the file. |
| 1183 | */ |
| 1184 | data_fsb = min(XFS_B_TO_FSB(mp, offset + length), |
| 1185 | XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); |
| 1186 | } |
| 1187 | |
| 1188 | /* |
| 1189 | * If a COW fork extent covers the hole, report it - capped to the next |
| 1190 | * data fork extent: |
| 1191 | */ |
| 1192 | if (xfs_inode_has_cow_data(ip) && |
| 1193 | xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) |
| 1194 | cow_fsb = cmap.br_startoff; |
| 1195 | if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) { |
| 1196 | if (data_fsb < cow_fsb + cmap.br_blockcount) |
| 1197 | end_fsb = min(end_fsb, data_fsb); |
| 1198 | xfs_trim_extent(&cmap, offset_fsb, end_fsb); |
| 1199 | error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true); |
| 1200 | /* |
| 1201 | * This is a COW extent, so we must probe the page cache |
| 1202 | * because there could be dirty page cache being backed |
| 1203 | * by this extent. |
| 1204 | */ |
| 1205 | iomap->type = IOMAP_UNWRITTEN; |
| 1206 | goto out_unlock; |
| 1207 | } |
| 1208 | |
| 1209 | /* |
| 1210 | * Else report a hole, capped to the next found data or COW extent. |
| 1211 | */ |
| 1212 | if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb) |
| 1213 | imap.br_blockcount = cow_fsb - offset_fsb; |
| 1214 | else |
| 1215 | imap.br_blockcount = data_fsb - offset_fsb; |
| 1216 | imap.br_startoff = offset_fsb; |
| 1217 | imap.br_startblock = HOLESTARTBLOCK; |
| 1218 | imap.br_state = XFS_EXT_NORM; |
| 1219 | done: |
| 1220 | xfs_trim_extent(&imap, offset_fsb, end_fsb); |
| 1221 | error = xfs_bmbt_to_iomap(ip, iomap, &imap, false); |
| 1222 | out_unlock: |
| 1223 | xfs_iunlock(ip, lockmode); |
| 1224 | return error; |
| 1225 | } |
| 1226 | |
| 1227 | const struct iomap_ops xfs_seek_iomap_ops = { |
| 1228 | .iomap_begin = xfs_seek_iomap_begin, |
| 1229 | }; |
| 1230 | |
| 1231 | static int |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1232 | xfs_xattr_iomap_begin( |
| 1233 | struct inode *inode, |
| 1234 | loff_t offset, |
| 1235 | loff_t length, |
| 1236 | unsigned flags, |
| 1237 | struct iomap *iomap) |
| 1238 | { |
| 1239 | struct xfs_inode *ip = XFS_I(inode); |
| 1240 | struct xfs_mount *mp = ip->i_mount; |
| 1241 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 1242 | xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); |
| 1243 | struct xfs_bmbt_irec imap; |
| 1244 | int nimaps = 1, error = 0; |
| 1245 | unsigned lockmode; |
| 1246 | |
| 1247 | if (XFS_FORCED_SHUTDOWN(mp)) |
| 1248 | return -EIO; |
| 1249 | |
Darrick J. Wong | 84358536d | 2017-04-06 16:00:39 -0700 | [diff] [blame] | 1250 | lockmode = xfs_ilock_attr_map_shared(ip); |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1251 | |
| 1252 | /* if there are no attribute fork or extents, return ENOENT */ |
Darrick J. Wong | 84358536d | 2017-04-06 16:00:39 -0700 | [diff] [blame] | 1253 | if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) { |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1254 | error = -ENOENT; |
| 1255 | goto out_unlock; |
| 1256 | } |
| 1257 | |
| 1258 | ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); |
| 1259 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, |
Darrick J. Wong | b7e0b6f | 2017-12-06 16:13:35 -0800 | [diff] [blame] | 1260 | &nimaps, XFS_BMAPI_ATTRFORK); |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1261 | out_unlock: |
| 1262 | xfs_iunlock(ip, lockmode); |
| 1263 | |
Christoph Hellwig | 16be143 | 2019-02-18 09:38:46 -0800 | [diff] [blame] | 1264 | if (error) |
| 1265 | return error; |
| 1266 | ASSERT(nimaps); |
| 1267 | return xfs_bmbt_to_iomap(ip, iomap, &imap, false); |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1268 | } |
| 1269 | |
Christoph Hellwig | 8ff6daa | 2017-01-27 23:20:26 -0800 | [diff] [blame] | 1270 | const struct iomap_ops xfs_xattr_iomap_ops = { |
Christoph Hellwig | 1d4795e | 2016-08-17 08:45:30 +1000 | [diff] [blame] | 1271 | .iomap_begin = xfs_xattr_iomap_begin, |
| 1272 | }; |