Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
Christoph Hellwig | 98c1a7c0 | 2018-07-11 22:26:06 -0700 | [diff] [blame] | 4 | * Copyright (c) 2016-2018 Christoph Hellwig. |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include "xfs.h" |
Dave Chinner | 70a9883 | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 8 | #include "xfs_shared.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 9 | #include "xfs_format.h" |
| 10 | #include "xfs_log_format.h" |
| 11 | #include "xfs_trans_resv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include "xfs_mount.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include "xfs_inode.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 14 | #include "xfs_trans.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include "xfs_iomap.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 16 | #include "xfs_trace.h" |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 17 | #include "xfs_bmap.h" |
Dave Chinner | 6898811 | 2013-08-12 20:49:42 +1000 | [diff] [blame] | 18 | #include "xfs_bmap_util.h" |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 19 | #include "xfs_reflink.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 21 | struct xfs_writepage_ctx { |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 22 | struct iomap_writepage_ctx ctx; |
Brian Foster | d9252d5 | 2019-02-01 09:14:23 -0800 | [diff] [blame] | 23 | unsigned int data_seq; |
Christoph Hellwig | e666aa3 | 2018-07-17 16:51:52 -0700 | [diff] [blame] | 24 | unsigned int cow_seq; |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 25 | }; |
| 26 | |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 27 | static inline struct xfs_writepage_ctx * |
| 28 | XFS_WPC(struct iomap_writepage_ctx *ctx) |
| 29 | { |
| 30 | return container_of(ctx, struct xfs_writepage_ctx, ctx); |
| 31 | } |
| 32 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | /* |
Christoph Hellwig | fc0063c | 2011-08-23 08:28:11 +0000 | [diff] [blame] | 34 | * Fast and loose check if this write could update the on-disk inode size. |
| 35 | */ |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 36 | static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend) |
Christoph Hellwig | fc0063c | 2011-08-23 08:28:11 +0000 | [diff] [blame] | 37 | { |
| 38 | return ioend->io_offset + ioend->io_size > |
Christoph Hellwig | 13d2c10 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 39 | XFS_I(ioend->io_inode)->i_disk_size; |
Christoph Hellwig | fc0063c | 2011-08-23 08:28:11 +0000 | [diff] [blame] | 40 | } |
| 41 | |
| 42 | /* |
Christoph Hellwig | 2813d68 | 2011-12-18 20:00:12 +0000 | [diff] [blame] | 43 | * Update on-disk file size now that data has been written to disk. |
Lachlan McIlroy | ba87ea6 | 2007-05-08 13:49:46 +1000 | [diff] [blame] | 44 | */ |
Brian Foster | e7a3d7e | 2021-04-09 10:27:56 -0700 | [diff] [blame] | 45 | int |
| 46 | xfs_setfilesize( |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 47 | struct xfs_inode *ip, |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 48 | xfs_off_t offset, |
| 49 | size_t size) |
Lachlan McIlroy | ba87ea6 | 2007-05-08 13:49:46 +1000 | [diff] [blame] | 50 | { |
Brian Foster | e7a3d7e | 2021-04-09 10:27:56 -0700 | [diff] [blame] | 51 | struct xfs_mount *mp = ip->i_mount; |
| 52 | struct xfs_trans *tp; |
Lachlan McIlroy | ba87ea6 | 2007-05-08 13:49:46 +1000 | [diff] [blame] | 53 | xfs_fsize_t isize; |
Brian Foster | e7a3d7e | 2021-04-09 10:27:56 -0700 | [diff] [blame] | 54 | int error; |
| 55 | |
| 56 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); |
| 57 | if (error) |
| 58 | return error; |
Lachlan McIlroy | ba87ea6 | 2007-05-08 13:49:46 +1000 | [diff] [blame] | 59 | |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 60 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 61 | isize = xfs_new_eof(ip, offset + size); |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 62 | if (!isize) { |
| 63 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 64 | xfs_trans_cancel(tp); |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 65 | return 0; |
Lachlan McIlroy | ba87ea6 | 2007-05-08 13:49:46 +1000 | [diff] [blame] | 66 | } |
| 67 | |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 68 | trace_xfs_setfilesize(ip, offset, size); |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 69 | |
Christoph Hellwig | 13d2c10 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 70 | ip->i_disk_size = isize; |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 71 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
| 72 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
| 73 | |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 74 | return xfs_trans_commit(tp); |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | /* |
Dave Chinner | 77d7a0c | 2010-02-17 05:36:29 +0000 | [diff] [blame] | 78 | * IO write completion. |
| 79 | */ |
| 80 | STATIC void |
Darrick J. Wong | cb357bf | 2019-04-15 13:13:20 -0700 | [diff] [blame] | 81 | xfs_end_ioend( |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 82 | struct iomap_ioend *ioend) |
Dave Chinner | 77d7a0c | 2010-02-17 05:36:29 +0000 | [diff] [blame] | 83 | { |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 84 | struct xfs_inode *ip = XFS_I(ioend->io_inode); |
Brian Foster | 5ca5916 | 2021-10-21 14:11:55 -0700 | [diff] [blame] | 85 | struct xfs_mount *mp = ip->i_mount; |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 86 | xfs_off_t offset = ioend->io_offset; |
| 87 | size_t size = ioend->io_size; |
Christoph Hellwig | 73d30d4 | 2019-06-28 19:31:38 -0700 | [diff] [blame] | 88 | unsigned int nofs_flag; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 89 | int error; |
Dave Chinner | 77d7a0c | 2010-02-17 05:36:29 +0000 | [diff] [blame] | 90 | |
Brian Foster | af055e3 | 2016-02-08 15:00:02 +1100 | [diff] [blame] | 91 | /* |
Christoph Hellwig | 73d30d4 | 2019-06-28 19:31:38 -0700 | [diff] [blame] | 92 | * We can allocate memory here while doing writeback on behalf of |
| 93 | * memory reclaim. To avoid memory allocation deadlocks set the |
| 94 | * task-wide nofs context for the following operations. |
| 95 | */ |
| 96 | nofs_flag = memalloc_nofs_save(); |
| 97 | |
| 98 | /* |
Bhaskar Chowdhury | f9dd7ba | 2021-03-23 16:59:30 -0700 | [diff] [blame] | 99 | * Just clean up the in-memory structures if the fs has been shut down. |
Brian Foster | af055e3 | 2016-02-08 15:00:02 +1100 | [diff] [blame] | 100 | */ |
Brian Foster | 5ca5916 | 2021-10-21 14:11:55 -0700 | [diff] [blame] | 101 | if (xfs_is_shutdown(mp)) { |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 102 | error = -EIO; |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 103 | goto done; |
Darrick J. Wong | 43caeb1 | 2016-10-03 09:11:35 -0700 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | /* |
Brian Foster | 5ca5916 | 2021-10-21 14:11:55 -0700 | [diff] [blame] | 107 | * Clean up all COW blocks and underlying data fork delalloc blocks on |
| 108 | * I/O error. The delalloc punch is required because this ioend was |
| 109 | * mapped to blocks in the COW fork and the associated pages are no |
| 110 | * longer dirty. If we don't remove delalloc blocks here, they become |
| 111 | * stale and can corrupt free space accounting on unmount. |
Dave Chinner | 77d7a0c | 2010-02-17 05:36:29 +0000 | [diff] [blame] | 112 | */ |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 113 | error = blk_status_to_errno(ioend->io_bio->bi_status); |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 114 | if (unlikely(error)) { |
Brian Foster | 5ca5916 | 2021-10-21 14:11:55 -0700 | [diff] [blame] | 115 | if (ioend->io_flags & IOMAP_F_SHARED) { |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 116 | xfs_reflink_cancel_cow_range(ip, offset, size, true); |
Brian Foster | 5ca5916 | 2021-10-21 14:11:55 -0700 | [diff] [blame] | 117 | xfs_bmap_punch_delalloc_range(ip, |
| 118 | XFS_B_TO_FSBT(mp, offset), |
| 119 | XFS_B_TO_FSB(mp, size)); |
| 120 | } |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 121 | goto done; |
| 122 | } |
| 123 | |
| 124 | /* |
Christoph Hellwig | be225fe | 2019-02-15 08:02:46 -0800 | [diff] [blame] | 125 | * Success: commit the COW or unwritten blocks if needed. |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 126 | */ |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 127 | if (ioend->io_flags & IOMAP_F_SHARED) |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 128 | error = xfs_reflink_end_cow(ip, offset, size); |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 129 | else if (ioend->io_type == IOMAP_UNWRITTEN) |
Eryu Guan | ee70daa | 2017-09-21 11:26:18 -0700 | [diff] [blame] | 130 | error = xfs_iomap_write_unwritten(ip, offset, size, false); |
Dave Chinner | 77d7a0c | 2010-02-17 05:36:29 +0000 | [diff] [blame] | 131 | |
Brian Foster | 7cd3099 | 2021-04-09 10:27:43 -0700 | [diff] [blame] | 132 | if (!error && xfs_ioend_is_append(ioend)) |
| 133 | error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); |
Christoph Hellwig | 04f658e | 2011-08-24 05:59:25 +0000 | [diff] [blame] | 134 | done: |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 135 | iomap_finish_ioends(ioend, error); |
Christoph Hellwig | 73d30d4 | 2019-06-28 19:31:38 -0700 | [diff] [blame] | 136 | memalloc_nofs_restore(nofs_flag); |
Darrick J. Wong | 3994fc4 | 2019-04-15 13:13:21 -0700 | [diff] [blame] | 137 | } |
| 138 | |
Dave Chinner | ebb7fb1 | 2022-01-26 09:19:20 -0800 | [diff] [blame] | 139 | /* |
| 140 | * Finish all pending IO completions that require transactional modifications. |
| 141 | * |
| 142 | * We try to merge physical and logically contiguous ioends before completion to |
| 143 | * minimise the number of transactions we need to perform during IO completion. |
| 144 | * Both unwritten extent conversion and COW remapping need to iterate and modify |
| 145 | * one physical extent at a time, so we gain nothing by merging physically |
| 146 | * discontiguous extents here. |
| 147 | * |
| 148 | * The ioend chain length that we can be processing here is largely unbound in |
| 149 | * length and we may have to perform significant amounts of work on each ioend |
| 150 | * to complete it. Hence we have to be careful about holding the CPU for too |
| 151 | * long in this loop. |
| 152 | */ |
Darrick J. Wong | cb357bf | 2019-04-15 13:13:20 -0700 | [diff] [blame] | 153 | void |
| 154 | xfs_end_io( |
| 155 | struct work_struct *work) |
| 156 | { |
Christoph Hellwig | 433dad9 | 2019-10-17 13:12:07 -0700 | [diff] [blame] | 157 | struct xfs_inode *ip = |
| 158 | container_of(work, struct xfs_inode, i_ioend_work); |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 159 | struct iomap_ioend *ioend; |
Christoph Hellwig | 433dad9 | 2019-10-17 13:12:07 -0700 | [diff] [blame] | 160 | struct list_head tmp; |
Darrick J. Wong | cb357bf | 2019-04-15 13:13:20 -0700 | [diff] [blame] | 161 | unsigned long flags; |
| 162 | |
Darrick J. Wong | cb357bf | 2019-04-15 13:13:20 -0700 | [diff] [blame] | 163 | spin_lock_irqsave(&ip->i_ioend_lock, flags); |
Christoph Hellwig | 433dad9 | 2019-10-17 13:12:07 -0700 | [diff] [blame] | 164 | list_replace_init(&ip->i_ioend_list, &tmp); |
Darrick J. Wong | cb357bf | 2019-04-15 13:13:20 -0700 | [diff] [blame] | 165 | spin_unlock_irqrestore(&ip->i_ioend_lock, flags); |
| 166 | |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 167 | iomap_sort_ioends(&tmp); |
| 168 | while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend, |
Christoph Hellwig | 433dad9 | 2019-10-17 13:12:07 -0700 | [diff] [blame] | 169 | io_list))) { |
Darrick J. Wong | cb357bf | 2019-04-15 13:13:20 -0700 | [diff] [blame] | 170 | list_del_init(&ioend->io_list); |
Brian Foster | 6e55249 | 2021-05-04 08:54:29 -0700 | [diff] [blame] | 171 | iomap_ioend_try_merge(ioend, &tmp); |
Darrick J. Wong | cb357bf | 2019-04-15 13:13:20 -0700 | [diff] [blame] | 172 | xfs_end_ioend(ioend); |
Dave Chinner | ebb7fb1 | 2022-01-26 09:19:20 -0800 | [diff] [blame] | 173 | cond_resched(); |
Darrick J. Wong | cb357bf | 2019-04-15 13:13:20 -0700 | [diff] [blame] | 174 | } |
| 175 | } |
| 176 | |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 177 | STATIC void |
| 178 | xfs_end_bio( |
| 179 | struct bio *bio) |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 180 | { |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 181 | struct iomap_ioend *ioend = bio->bi_private; |
Darrick J. Wong | cb357bf | 2019-04-15 13:13:20 -0700 | [diff] [blame] | 182 | struct xfs_inode *ip = XFS_I(ioend->io_inode); |
Darrick J. Wong | cb357bf | 2019-04-15 13:13:20 -0700 | [diff] [blame] | 183 | unsigned long flags; |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 184 | |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 185 | spin_lock_irqsave(&ip->i_ioend_lock, flags); |
| 186 | if (list_empty(&ip->i_ioend_list)) |
| 187 | WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue, |
| 188 | &ip->i_ioend_work)); |
| 189 | list_add_tail(&ioend->io_list, &ip->i_ioend_list); |
| 190 | spin_unlock_irqrestore(&ip->i_ioend_lock, flags); |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 191 | } |
| 192 | |
Brian Foster | d9252d5 | 2019-02-01 09:14:23 -0800 | [diff] [blame] | 193 | /* |
| 194 | * Fast revalidation of the cached writeback mapping. Return true if the current |
| 195 | * mapping is valid, false otherwise. |
| 196 | */ |
| 197 | static bool |
| 198 | xfs_imap_valid( |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 199 | struct iomap_writepage_ctx *wpc, |
Brian Foster | d9252d5 | 2019-02-01 09:14:23 -0800 | [diff] [blame] | 200 | struct xfs_inode *ip, |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 201 | loff_t offset) |
Brian Foster | d9252d5 | 2019-02-01 09:14:23 -0800 | [diff] [blame] | 202 | { |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 203 | if (offset < wpc->iomap.offset || |
| 204 | offset >= wpc->iomap.offset + wpc->iomap.length) |
Brian Foster | d9252d5 | 2019-02-01 09:14:23 -0800 | [diff] [blame] | 205 | return false; |
| 206 | /* |
| 207 | * If this is a COW mapping, it is sufficient to check that the mapping |
| 208 | * covers the offset. Be careful to check this first because the caller |
| 209 | * can revalidate a COW mapping without updating the data seqno. |
| 210 | */ |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 211 | if (wpc->iomap.flags & IOMAP_F_SHARED) |
Brian Foster | d9252d5 | 2019-02-01 09:14:23 -0800 | [diff] [blame] | 212 | return true; |
| 213 | |
| 214 | /* |
| 215 | * This is not a COW mapping. Check the sequence number of the data fork |
| 216 | * because concurrent changes could have invalidated the extent. Check |
| 217 | * the COW fork because concurrent changes since the last time we |
| 218 | * checked (and found nothing at this offset) could have added |
| 219 | * overlapping blocks. |
| 220 | */ |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 221 | if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) |
Brian Foster | d9252d5 | 2019-02-01 09:14:23 -0800 | [diff] [blame] | 222 | return false; |
| 223 | if (xfs_inode_has_cow_data(ip) && |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 224 | XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) |
Brian Foster | d9252d5 | 2019-02-01 09:14:23 -0800 | [diff] [blame] | 225 | return false; |
| 226 | return true; |
| 227 | } |
| 228 | |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 229 | /* |
| 230 | * Pass in a dellalloc extent and convert it to real extents, return the real |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 231 | * extent that maps offset_fsb in wpc->iomap. |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 232 | * |
| 233 | * The current page is held locked so nothing could have removed the block |
Christoph Hellwig | 7588cbe | 2019-02-15 08:02:50 -0800 | [diff] [blame] | 234 | * backing offset_fsb, although it could have moved from the COW to the data |
| 235 | * fork by another thread. |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 236 | */ |
| 237 | static int |
| 238 | xfs_convert_blocks( |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 239 | struct iomap_writepage_ctx *wpc, |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 240 | struct xfs_inode *ip, |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 241 | int whichfork, |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 242 | loff_t offset) |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 243 | { |
| 244 | int error; |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 245 | unsigned *seq; |
| 246 | |
| 247 | if (whichfork == XFS_COW_FORK) |
| 248 | seq = &XFS_WPC(wpc)->cow_seq; |
| 249 | else |
| 250 | seq = &XFS_WPC(wpc)->data_seq; |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 251 | |
| 252 | /* |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 253 | * Attempt to allocate whatever delalloc extent currently backs offset |
| 254 | * and put the result into wpc->iomap. Allocate in a loop because it |
| 255 | * may take several attempts to allocate real blocks for a contiguous |
| 256 | * delalloc extent if free space is sufficiently fragmented. |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 257 | */ |
| 258 | do { |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 259 | error = xfs_bmapi_convert_delalloc(ip, whichfork, offset, |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 260 | &wpc->iomap, seq); |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 261 | if (error) |
| 262 | return error; |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 263 | } while (wpc->iomap.offset + wpc->iomap.length <= offset); |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 264 | |
| 265 | return 0; |
| 266 | } |
| 267 | |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 268 | static int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | xfs_map_blocks( |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 270 | struct iomap_writepage_ctx *wpc, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | struct inode *inode, |
Christoph Hellwig | 5c665e5 | 2018-07-11 22:25:59 -0700 | [diff] [blame] | 272 | loff_t offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | { |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 274 | struct xfs_inode *ip = XFS_I(inode); |
| 275 | struct xfs_mount *mp = ip->i_mount; |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 276 | ssize_t count = i_blocksize(inode); |
Christoph Hellwig | b4e2903 | 2019-02-15 08:02:47 -0800 | [diff] [blame] | 277 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 278 | xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); |
Darrick J. Wong | c2f0921 | 2020-11-02 17:14:06 -0800 | [diff] [blame] | 279 | xfs_fileoff_t cow_fsb; |
| 280 | int whichfork; |
Christoph Hellwig | 5c665e5 | 2018-07-11 22:25:59 -0700 | [diff] [blame] | 281 | struct xfs_bmbt_irec imap; |
Christoph Hellwig | 060d4ea | 2018-07-11 22:26:01 -0700 | [diff] [blame] | 282 | struct xfs_iext_cursor icur; |
Christoph Hellwig | 7588cbe | 2019-02-15 08:02:50 -0800 | [diff] [blame] | 283 | int retries = 0; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 284 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 286 | if (xfs_is_shutdown(mp)) |
Brian Foster | d9252d5 | 2019-02-01 09:14:23 -0800 | [diff] [blame] | 287 | return -EIO; |
| 288 | |
Christoph Hellwig | 889c65b | 2018-07-11 22:26:02 -0700 | [diff] [blame] | 289 | /* |
Christoph Hellwig | 889c65b | 2018-07-11 22:26:02 -0700 | [diff] [blame] | 290 | * COW fork blocks can overlap data fork blocks even if the blocks |
| 291 | * aren't shared. COW I/O always takes precedent, so we must always |
| 292 | * check for overlap on reflink inodes unless the mapping is already a |
Christoph Hellwig | e666aa3 | 2018-07-17 16:51:52 -0700 | [diff] [blame] | 293 | * COW one, or the COW fork hasn't changed from the last time we looked |
| 294 | * at it. |
| 295 | * |
| 296 | * It's safe to check the COW fork if_seq here without the ILOCK because |
| 297 | * we've indirectly protected against concurrent updates: writeback has |
| 298 | * the page locked, which prevents concurrent invalidations by reflink |
| 299 | * and directio and prevents concurrent buffered writes to the same |
| 300 | * page. Changes to if_seq always happen under i_lock, which protects |
| 301 | * against concurrent updates and provides a memory barrier on the way |
| 302 | * out that ensures that we always see the current value. |
Christoph Hellwig | 889c65b | 2018-07-11 22:26:02 -0700 | [diff] [blame] | 303 | */ |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 304 | if (xfs_imap_valid(wpc, ip, offset)) |
Christoph Hellwig | 889c65b | 2018-07-11 22:26:02 -0700 | [diff] [blame] | 305 | return 0; |
| 306 | |
Christoph Hellwig | 889c65b | 2018-07-11 22:26:02 -0700 | [diff] [blame] | 307 | /* |
| 308 | * If we don't have a valid map, now it's time to get a new one for this |
| 309 | * offset. This will convert delayed allocations (including COW ones) |
| 310 | * into real extents. If we return without a valid map, it means we |
| 311 | * landed in a hole and we skip the block. |
| 312 | */ |
Christoph Hellwig | 7588cbe | 2019-02-15 08:02:50 -0800 | [diff] [blame] | 313 | retry: |
Darrick J. Wong | c2f0921 | 2020-11-02 17:14:06 -0800 | [diff] [blame] | 314 | cow_fsb = NULLFILEOFF; |
| 315 | whichfork = XFS_DATA_FORK; |
Dave Chinner | 988ef92 | 2016-02-15 17:20:50 +1100 | [diff] [blame] | 316 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
Christoph Hellwig | b2197a3 | 2021-04-13 11:15:12 -0700 | [diff] [blame] | 317 | ASSERT(!xfs_need_iread_extents(&ip->i_df)); |
Christoph Hellwig | 060d4ea | 2018-07-11 22:26:01 -0700 | [diff] [blame] | 318 | |
| 319 | /* |
| 320 | * Check if this is offset is covered by a COW extents, and if yes use |
| 321 | * it directly instead of looking up anything in the data fork. |
| 322 | */ |
Christoph Hellwig | 51d6269 | 2018-07-17 16:51:51 -0700 | [diff] [blame] | 323 | if (xfs_inode_has_cow_data(ip) && |
Christoph Hellwig | e666aa3 | 2018-07-17 16:51:52 -0700 | [diff] [blame] | 324 | xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap)) |
| 325 | cow_fsb = imap.br_startoff; |
| 326 | if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) { |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 327 | XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq); |
Christoph Hellwig | 5c665e5 | 2018-07-11 22:25:59 -0700 | [diff] [blame] | 328 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
Christoph Hellwig | be225fe | 2019-02-15 08:02:46 -0800 | [diff] [blame] | 329 | |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 330 | whichfork = XFS_COW_FORK; |
Christoph Hellwig | 5c665e5 | 2018-07-11 22:25:59 -0700 | [diff] [blame] | 331 | goto allocate_blocks; |
| 332 | } |
| 333 | |
| 334 | /* |
Brian Foster | d9252d5 | 2019-02-01 09:14:23 -0800 | [diff] [blame] | 335 | * No COW extent overlap. Revalidate now that we may have updated |
| 336 | * ->cow_seq. If the data mapping is still valid, we're done. |
Christoph Hellwig | 5c665e5 | 2018-07-11 22:25:59 -0700 | [diff] [blame] | 337 | */ |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 338 | if (xfs_imap_valid(wpc, ip, offset)) { |
Christoph Hellwig | 5c665e5 | 2018-07-11 22:25:59 -0700 | [diff] [blame] | 339 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
| 340 | return 0; |
| 341 | } |
| 342 | |
| 343 | /* |
| 344 | * If we don't have a valid map, now it's time to get a new one for this |
| 345 | * offset. This will convert delayed allocations (including COW ones) |
| 346 | * into real extents. |
| 347 | */ |
Christoph Hellwig | 3345746 | 2018-07-11 22:26:02 -0700 | [diff] [blame] | 348 | if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) |
| 349 | imap.br_startoff = end_fsb; /* fake a hole past EOF */ |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 350 | XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq); |
Christoph Hellwig | 8ff2957 | 2010-12-10 08:42:21 +0000 | [diff] [blame] | 351 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 352 | |
Christoph Hellwig | 12df89f | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 353 | /* landed in a hole or beyond EOF? */ |
Christoph Hellwig | 3345746 | 2018-07-11 22:26:02 -0700 | [diff] [blame] | 354 | if (imap.br_startoff > offset_fsb) { |
Christoph Hellwig | 3345746 | 2018-07-11 22:26:02 -0700 | [diff] [blame] | 355 | imap.br_blockcount = imap.br_startoff - offset_fsb; |
Christoph Hellwig | 5c665e5 | 2018-07-11 22:25:59 -0700 | [diff] [blame] | 356 | imap.br_startoff = offset_fsb; |
Christoph Hellwig | 5c665e5 | 2018-07-11 22:25:59 -0700 | [diff] [blame] | 357 | imap.br_startblock = HOLESTARTBLOCK; |
Christoph Hellwig | be225fe | 2019-02-15 08:02:46 -0800 | [diff] [blame] | 358 | imap.br_state = XFS_EXT_NORM; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 359 | } |
| 360 | |
Christoph Hellwig | 12df89f | 2019-02-18 09:38:47 -0800 | [diff] [blame] | 361 | /* |
| 362 | * Truncate to the next COW extent if there is one. This is the only |
| 363 | * opportunity to do this because we can skip COW fork lookups for the |
| 364 | * subsequent blocks in the mapping; however, the requirement to treat |
| 365 | * the COW range separately remains. |
| 366 | */ |
| 367 | if (cow_fsb != NULLFILEOFF && |
| 368 | cow_fsb < imap.br_startoff + imap.br_blockcount) |
| 369 | imap.br_blockcount = cow_fsb - imap.br_startoff; |
| 370 | |
| 371 | /* got a delalloc extent? */ |
| 372 | if (imap.br_startblock != HOLESTARTBLOCK && |
| 373 | isnullstartblock(imap.br_startblock)) |
| 374 | goto allocate_blocks; |
| 375 | |
Christoph Hellwig | 740fd67 | 2021-11-29 11:21:57 +0100 | [diff] [blame] | 376 | xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0); |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 377 | trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap); |
Christoph Hellwig | 5c665e5 | 2018-07-11 22:25:59 -0700 | [diff] [blame] | 378 | return 0; |
| 379 | allocate_blocks: |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 380 | error = xfs_convert_blocks(wpc, ip, whichfork, offset); |
Christoph Hellwig | 7588cbe | 2019-02-15 08:02:50 -0800 | [diff] [blame] | 381 | if (error) { |
| 382 | /* |
| 383 | * If we failed to find the extent in the COW fork we might have |
| 384 | * raced with a COW to data fork conversion or truncate. |
| 385 | * Restart the lookup to catch the extent in the data fork for |
| 386 | * the former case, but prevent additional retries to avoid |
| 387 | * looping forever for the latter case. |
| 388 | */ |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 389 | if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++) |
Christoph Hellwig | 7588cbe | 2019-02-15 08:02:50 -0800 | [diff] [blame] | 390 | goto retry; |
| 391 | ASSERT(error != -EAGAIN); |
Christoph Hellwig | 5c665e5 | 2018-07-11 22:25:59 -0700 | [diff] [blame] | 392 | return error; |
Christoph Hellwig | 7588cbe | 2019-02-15 08:02:50 -0800 | [diff] [blame] | 393 | } |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 394 | |
| 395 | /* |
| 396 | * Due to merging the return real extent might be larger than the |
| 397 | * original delalloc one. Trim the return extent to the next COW |
| 398 | * boundary again to force a re-lookup. |
| 399 | */ |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 400 | if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) { |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 401 | loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb); |
Christoph Hellwig | 4ad765e | 2019-02-15 08:02:49 -0800 | [diff] [blame] | 402 | |
Christoph Hellwig | 4e087a3 | 2019-10-17 13:12:06 -0700 | [diff] [blame] | 403 | if (cow_offset < wpc->iomap.offset + wpc->iomap.length) |
| 404 | wpc->iomap.length = cow_offset - wpc->iomap.offset; |
| 405 | } |
| 406 | |
| 407 | ASSERT(wpc->iomap.offset <= offset); |
| 408 | ASSERT(wpc->iomap.offset + wpc->iomap.length > offset); |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 409 | trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap); |
Christoph Hellwig | 8ff2957 | 2010-12-10 08:42:21 +0000 | [diff] [blame] | 410 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | } |
| 412 | |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 413 | static int |
| 414 | xfs_prepare_ioend( |
| 415 | struct iomap_ioend *ioend, |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 416 | int status) |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 417 | { |
Christoph Hellwig | 73d30d4 | 2019-06-28 19:31:38 -0700 | [diff] [blame] | 418 | unsigned int nofs_flag; |
| 419 | |
| 420 | /* |
| 421 | * We can allocate memory here while doing writeback on behalf of |
| 422 | * memory reclaim. To avoid memory allocation deadlocks set the |
| 423 | * task-wide nofs context for the following operations. |
| 424 | */ |
| 425 | nofs_flag = memalloc_nofs_save(); |
| 426 | |
Darrick J. Wong | 5eda430 | 2017-02-02 15:14:02 -0800 | [diff] [blame] | 427 | /* Convert CoW extents to regular */ |
Christoph Hellwig | 760fea8 | 2019-10-17 13:12:10 -0700 | [diff] [blame] | 428 | if (!status && (ioend->io_flags & IOMAP_F_SHARED)) { |
Darrick J. Wong | 5eda430 | 2017-02-02 15:14:02 -0800 | [diff] [blame] | 429 | status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), |
| 430 | ioend->io_offset, ioend->io_size); |
| 431 | } |
| 432 | |
Christoph Hellwig | 73d30d4 | 2019-06-28 19:31:38 -0700 | [diff] [blame] | 433 | memalloc_nofs_restore(nofs_flag); |
| 434 | |
Brian Foster | 7adb8f1 | 2021-04-09 10:27:55 -0700 | [diff] [blame] | 435 | /* send ioends that might require a transaction to the completion wq */ |
| 436 | if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN || |
| 437 | (ioend->io_flags & IOMAP_F_SHARED)) |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 438 | ioend->io_bio->bi_end_io = xfs_end_bio; |
| 439 | return status; |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 440 | } |
| 441 | |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 442 | /* |
Christoph Hellwig | 82cb141 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 443 | * If the page has delalloc blocks on it, we need to punch them out before we |
| 444 | * invalidate the page. If we don't, we leave a stale delalloc mapping on the |
| 445 | * inode that can trip up a later direct I/O read operation on the same region. |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 446 | * |
Christoph Hellwig | 82cb141 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 447 | * We prevent this by truncating away the delalloc regions on the page. Because |
| 448 | * they are delalloc, we can do this without needing a transaction. Indeed - if |
| 449 | * we get ENOSPC errors, we have to be able to do this truncation without a |
| 450 | * transaction as there is no space left for block reservation (typically why we |
| 451 | * see a ENOSPC in writeback). |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 452 | */ |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 453 | static void |
Matthew Wilcox (Oracle) | 6e47852 | 2021-07-30 09:56:05 -0400 | [diff] [blame] | 454 | xfs_discard_folio( |
| 455 | struct folio *folio, |
| 456 | loff_t pos) |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 457 | { |
Matthew Wilcox (Oracle) | 6e47852 | 2021-07-30 09:56:05 -0400 | [diff] [blame] | 458 | struct inode *inode = folio->mapping->host; |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 459 | struct xfs_inode *ip = XFS_I(inode); |
Christoph Hellwig | 0362572 | 2018-07-11 22:25:57 -0700 | [diff] [blame] | 460 | struct xfs_mount *mp = ip->i_mount; |
Matthew Wilcox (Oracle) | 6e47852 | 2021-07-30 09:56:05 -0400 | [diff] [blame] | 461 | size_t offset = offset_in_folio(folio, pos); |
| 462 | xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, pos); |
| 463 | xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, offset); |
Christoph Hellwig | 0362572 | 2018-07-11 22:25:57 -0700 | [diff] [blame] | 464 | int error; |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 465 | |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 466 | if (xfs_is_shutdown(mp)) |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 467 | goto out_invalidate; |
| 468 | |
Christoph Hellwig | 4ab45e2 | 2020-02-21 07:34:48 -0800 | [diff] [blame] | 469 | xfs_alert_ratelimited(mp, |
Matthew Wilcox (Oracle) | 6e47852 | 2021-07-30 09:56:05 -0400 | [diff] [blame] | 470 | "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.", |
| 471 | folio, ip->i_ino, pos); |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 472 | |
Christoph Hellwig | 0362572 | 2018-07-11 22:25:57 -0700 | [diff] [blame] | 473 | error = xfs_bmap_punch_delalloc_range(ip, start_fsb, |
Matthew Wilcox (Oracle) | 6e47852 | 2021-07-30 09:56:05 -0400 | [diff] [blame] | 474 | i_blocks_per_folio(inode, folio) - pageoff_fsb); |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 475 | if (error && !xfs_is_shutdown(mp)) |
Christoph Hellwig | 0362572 | 2018-07-11 22:25:57 -0700 | [diff] [blame] | 476 | xfs_alert(mp, "page discard unable to remove delalloc mapping."); |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 477 | out_invalidate: |
Matthew Wilcox (Oracle) | 6e47852 | 2021-07-30 09:56:05 -0400 | [diff] [blame] | 478 | iomap_invalidate_folio(folio, offset, folio_size(folio) - offset); |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 479 | } |
| 480 | |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 481 | static const struct iomap_writeback_ops xfs_writeback_ops = { |
| 482 | .map_blocks = xfs_map_blocks, |
| 483 | .prepare_ioend = xfs_prepare_ioend, |
Matthew Wilcox (Oracle) | 6e47852 | 2021-07-30 09:56:05 -0400 | [diff] [blame] | 484 | .discard_folio = xfs_discard_folio, |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 485 | }; |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 486 | |
Nathan Scott | 7d4fb40 | 2006-06-09 15:27:16 +1000 | [diff] [blame] | 487 | STATIC int |
| 488 | xfs_vm_writepages( |
| 489 | struct address_space *mapping, |
| 490 | struct writeback_control *wbc) |
| 491 | { |
Christoph Hellwig | be225fe | 2019-02-15 08:02:46 -0800 | [diff] [blame] | 492 | struct xfs_writepage_ctx wpc = { }; |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 493 | |
Dave Chinner | 756b1c3 | 2021-02-23 10:26:06 -0800 | [diff] [blame] | 494 | /* |
| 495 | * Writing back data in a transaction context can result in recursive |
| 496 | * transactions. This is bad, so issue a warning and get out of here. |
| 497 | */ |
| 498 | if (WARN_ON_ONCE(current->journal_info)) |
| 499 | return 0; |
| 500 | |
Christoph Hellwig | b3aea4e | 2007-08-29 11:44:37 +1000 | [diff] [blame] | 501 | xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); |
Christoph Hellwig | 598ecfb | 2019-10-17 13:12:15 -0700 | [diff] [blame] | 502 | return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops); |
Nathan Scott | 7d4fb40 | 2006-06-09 15:27:16 +1000 | [diff] [blame] | 503 | } |
| 504 | |
Dan Williams | 6e2608d | 2018-03-07 15:26:44 -0800 | [diff] [blame] | 505 | STATIC int |
| 506 | xfs_dax_writepages( |
| 507 | struct address_space *mapping, |
| 508 | struct writeback_control *wbc) |
| 509 | { |
Christoph Hellwig | 30fa529 | 2019-10-24 22:25:38 -0700 | [diff] [blame] | 510 | struct xfs_inode *ip = XFS_I(mapping->host); |
| 511 | |
| 512 | xfs_iflags_clear(ip, XFS_ITRUNCATED); |
Dan Williams | 6e2608d | 2018-03-07 15:26:44 -0800 | [diff] [blame] | 513 | return dax_writeback_mapping_range(mapping, |
Vivek Goyal | 3f666c5 | 2020-01-03 13:33:07 -0500 | [diff] [blame] | 514 | xfs_inode_buftarg(ip)->bt_daxdev, wbc); |
Dan Williams | 6e2608d | 2018-03-07 15:26:44 -0800 | [diff] [blame] | 515 | } |
| 516 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | STATIC sector_t |
Nathan Scott | e4c573b | 2006-03-14 13:54:26 +1100 | [diff] [blame] | 518 | xfs_vm_bmap( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | struct address_space *mapping, |
| 520 | sector_t block) |
| 521 | { |
Christoph Hellwig | b84e772 | 2018-06-01 09:03:09 -0700 | [diff] [blame] | 522 | struct xfs_inode *ip = XFS_I(mapping->host); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | |
Christoph Hellwig | b84e772 | 2018-06-01 09:03:09 -0700 | [diff] [blame] | 524 | trace_xfs_vm_bmap(ip); |
Darrick J. Wong | db1327b | 2016-10-03 09:11:36 -0700 | [diff] [blame] | 525 | |
| 526 | /* |
| 527 | * The swap code (ab-)uses ->bmap to get a block mapping and then |
Ingo Molnar | 793057e | 2018-02-28 09:39:48 +0100 | [diff] [blame] | 528 | * bypasses the file system for actual I/O. We really can't allow |
Darrick J. Wong | db1327b | 2016-10-03 09:11:36 -0700 | [diff] [blame] | 529 | * that on reflinks inodes, so we have to skip out here. And yes, |
Darrick J. Wong | eb5e248 | 2017-06-21 20:27:35 -0700 | [diff] [blame] | 530 | * 0 is the magic code for a bmap error. |
| 531 | * |
| 532 | * Since we don't pass back blockdev info, we can't return bmap |
| 533 | * information for rt files either. |
Darrick J. Wong | db1327b | 2016-10-03 09:11:36 -0700 | [diff] [blame] | 534 | */ |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 535 | if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip)) |
Darrick J. Wong | db1327b | 2016-10-03 09:11:36 -0700 | [diff] [blame] | 536 | return 0; |
Christoph Hellwig | 690c2a3 | 2019-10-19 09:09:45 -0700 | [diff] [blame] | 537 | return iomap_bmap(mapping, block, &xfs_read_iomap_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | } |
| 539 | |
| 540 | STATIC int |
Nathan Scott | e4c573b | 2006-03-14 13:54:26 +1100 | [diff] [blame] | 541 | xfs_vm_readpage( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | struct file *unused, |
| 543 | struct page *page) |
| 544 | { |
Christoph Hellwig | 690c2a3 | 2019-10-19 09:09:45 -0700 | [diff] [blame] | 545 | return iomap_readpage(page, &xfs_read_iomap_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | } |
| 547 | |
Matthew Wilcox (Oracle) | 9d24a13 | 2020-06-01 21:47:34 -0700 | [diff] [blame] | 548 | STATIC void |
| 549 | xfs_vm_readahead( |
| 550 | struct readahead_control *rac) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | { |
Matthew Wilcox (Oracle) | 9d24a13 | 2020-06-01 21:47:34 -0700 | [diff] [blame] | 552 | iomap_readahead(rac, &xfs_read_iomap_ops); |
Dave Chinner | 22e757a | 2014-09-02 12:12:51 +1000 | [diff] [blame] | 553 | } |
| 554 | |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 555 | static int |
| 556 | xfs_iomap_swapfile_activate( |
| 557 | struct swap_info_struct *sis, |
| 558 | struct file *swap_file, |
| 559 | sector_t *span) |
| 560 | { |
Christoph Hellwig | 30fa529 | 2019-10-24 22:25:38 -0700 | [diff] [blame] | 561 | sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev; |
Christoph Hellwig | 690c2a3 | 2019-10-19 09:09:45 -0700 | [diff] [blame] | 562 | return iomap_swapfile_activate(sis, swap_file, span, |
| 563 | &xfs_read_iomap_ops); |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 564 | } |
| 565 | |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 566 | const struct address_space_operations xfs_address_space_operations = { |
Nathan Scott | e4c573b | 2006-03-14 13:54:26 +1100 | [diff] [blame] | 567 | .readpage = xfs_vm_readpage, |
Matthew Wilcox (Oracle) | 9d24a13 | 2020-06-01 21:47:34 -0700 | [diff] [blame] | 568 | .readahead = xfs_vm_readahead, |
Nathan Scott | 7d4fb40 | 2006-06-09 15:27:16 +1000 | [diff] [blame] | 569 | .writepages = xfs_vm_writepages, |
Matthew Wilcox (Oracle) | fd7353f | 2021-06-28 19:36:21 -0700 | [diff] [blame] | 570 | .set_page_dirty = __set_page_dirty_nobuffers, |
Christoph Hellwig | 9e91c57 | 2019-10-17 13:12:13 -0700 | [diff] [blame] | 571 | .releasepage = iomap_releasepage, |
| 572 | .invalidatepage = iomap_invalidatepage, |
Nathan Scott | e4c573b | 2006-03-14 13:54:26 +1100 | [diff] [blame] | 573 | .bmap = xfs_vm_bmap, |
Dan Williams | 6e2608d | 2018-03-07 15:26:44 -0800 | [diff] [blame] | 574 | .direct_IO = noop_direct_IO, |
Christoph Hellwig | 82cb141 | 2018-07-11 22:26:05 -0700 | [diff] [blame] | 575 | .migratepage = iomap_migrate_page, |
| 576 | .is_partially_uptodate = iomap_is_partially_uptodate, |
Andi Kleen | aa261f5 | 2009-09-16 11:50:16 +0200 | [diff] [blame] | 577 | .error_remove_page = generic_error_remove_page, |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 578 | .swap_activate = xfs_iomap_swapfile_activate, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | }; |
Dan Williams | 6e2608d | 2018-03-07 15:26:44 -0800 | [diff] [blame] | 580 | |
| 581 | const struct address_space_operations xfs_dax_aops = { |
| 582 | .writepages = xfs_dax_writepages, |
| 583 | .direct_IO = noop_direct_IO, |
Matthew Wilcox (Oracle) | b82a96c | 2021-06-28 19:36:27 -0700 | [diff] [blame] | 584 | .set_page_dirty = __set_page_dirty_no_writeback, |
Dan Williams | 6e2608d | 2018-03-07 15:26:44 -0800 | [diff] [blame] | 585 | .invalidatepage = noop_invalidatepage, |
Darrick J. Wong | 6748212 | 2018-05-10 08:38:15 -0700 | [diff] [blame] | 586 | .swap_activate = xfs_iomap_swapfile_activate, |
Dan Williams | 6e2608d | 2018-03-07 15:26:44 -0800 | [diff] [blame] | 587 | }; |