Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs.h" |
Dave Chinner | 70a9883 | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 19 | #include "xfs_shared.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 20 | #include "xfs_format.h" |
| 21 | #include "xfs_log_format.h" |
| 22 | #include "xfs_trans_resv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include "xfs_mount.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include "xfs_inode.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 25 | #include "xfs_trans.h" |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 26 | #include "xfs_inode_item.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 27 | #include "xfs_alloc.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include "xfs_error.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include "xfs_iomap.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 30 | #include "xfs_trace.h" |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 31 | #include "xfs_bmap.h" |
Dave Chinner | 6898811 | 2013-08-12 20:49:42 +1000 | [diff] [blame] | 32 | #include "xfs_bmap_util.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 33 | #include "xfs_bmap_btree.h" |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 34 | #include "xfs_reflink.h" |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 35 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/mpage.h> |
Christoph Hellwig | 10ce444 | 2006-01-11 20:48:14 +1100 | [diff] [blame] | 37 | #include <linux/pagevec.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #include <linux/writeback.h> |
| 39 | |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 40 | /* |
| 41 | * structure owned by writepages passed to individual writepage calls |
| 42 | */ |
| 43 | struct xfs_writepage_ctx { |
| 44 | struct xfs_bmbt_irec imap; |
| 45 | bool imap_valid; |
| 46 | unsigned int io_type; |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 47 | struct xfs_ioend *ioend; |
| 48 | sector_t last_block; |
| 49 | }; |
| 50 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 51 | void |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 52 | xfs_count_page_state( |
| 53 | struct page *page, |
| 54 | int *delalloc, |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 55 | int *unwritten) |
| 56 | { |
| 57 | struct buffer_head *bh, *head; |
| 58 | |
Christoph Hellwig | 20cb52e | 2010-06-24 09:46:01 +1000 | [diff] [blame] | 59 | *delalloc = *unwritten = 0; |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 60 | |
| 61 | bh = head = page_buffers(page); |
| 62 | do { |
Christoph Hellwig | 20cb52e | 2010-06-24 09:46:01 +1000 | [diff] [blame] | 63 | if (buffer_unwritten(bh)) |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 64 | (*unwritten) = 1; |
| 65 | else if (buffer_delay(bh)) |
| 66 | (*delalloc) = 1; |
| 67 | } while ((bh = bh->b_this_page) != head); |
| 68 | } |
| 69 | |
Ross Zwisler | 20a90f5 | 2016-02-26 15:19:52 -0800 | [diff] [blame] | 70 | struct block_device * |
Christoph Hellwig | 6214ed4 | 2007-09-14 15:23:17 +1000 | [diff] [blame] | 71 | xfs_find_bdev_for_inode( |
Christoph Hellwig | 046f168 | 2010-04-28 12:28:52 +0000 | [diff] [blame] | 72 | struct inode *inode) |
Christoph Hellwig | 6214ed4 | 2007-09-14 15:23:17 +1000 | [diff] [blame] | 73 | { |
Christoph Hellwig | 046f168 | 2010-04-28 12:28:52 +0000 | [diff] [blame] | 74 | struct xfs_inode *ip = XFS_I(inode); |
Christoph Hellwig | 6214ed4 | 2007-09-14 15:23:17 +1000 | [diff] [blame] | 75 | struct xfs_mount *mp = ip->i_mount; |
| 76 | |
Eric Sandeen | 71ddabb | 2007-11-23 16:29:42 +1100 | [diff] [blame] | 77 | if (XFS_IS_REALTIME_INODE(ip)) |
Christoph Hellwig | 6214ed4 | 2007-09-14 15:23:17 +1000 | [diff] [blame] | 78 | return mp->m_rtdev_targp->bt_bdev; |
| 79 | else |
| 80 | return mp->m_ddev_targp->bt_bdev; |
| 81 | } |
| 82 | |
Dan Williams | 486aff5 | 2017-08-24 15:12:50 -0700 | [diff] [blame] | 83 | struct dax_device * |
| 84 | xfs_find_daxdev_for_inode( |
| 85 | struct inode *inode) |
| 86 | { |
| 87 | struct xfs_inode *ip = XFS_I(inode); |
| 88 | struct xfs_mount *mp = ip->i_mount; |
| 89 | |
| 90 | if (XFS_IS_REALTIME_INODE(ip)) |
| 91 | return mp->m_rtdev_targp->bt_daxdev; |
| 92 | else |
| 93 | return mp->m_ddev_targp->bt_daxdev; |
| 94 | } |
| 95 | |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 96 | /* |
Dave Chinner | 37992c1 | 2016-04-06 08:12:28 +1000 | [diff] [blame] | 97 | * We're now finished for good with this page. Update the page state via the |
| 98 | * associated buffer_heads, paying attention to the start and end offsets that |
| 99 | * we need to process on the page. |
Dave Chinner | 28b783e | 2016-07-22 09:56:38 +1000 | [diff] [blame] | 100 | * |
Christoph Hellwig | 8353a81 | 2017-09-02 09:53:41 -0700 | [diff] [blame] | 101 | * Note that we open code the action in end_buffer_async_write here so that we |
| 102 | * only have to iterate over the buffers attached to the page once. This is not |
| 103 | * only more efficient, but also ensures that we only calls end_page_writeback |
| 104 | * at the end of the iteration, and thus avoids the pitfall of having the page |
| 105 | * and buffers potentially freed after every call to end_buffer_async_write. |
Dave Chinner | 37992c1 | 2016-04-06 08:12:28 +1000 | [diff] [blame] | 106 | */ |
| 107 | static void |
| 108 | xfs_finish_page_writeback( |
| 109 | struct inode *inode, |
| 110 | struct bio_vec *bvec, |
| 111 | int error) |
| 112 | { |
Christoph Hellwig | 8353a81 | 2017-09-02 09:53:41 -0700 | [diff] [blame] | 113 | struct buffer_head *head = page_buffers(bvec->bv_page), *bh = head; |
| 114 | bool busy = false; |
Dave Chinner | 37992c1 | 2016-04-06 08:12:28 +1000 | [diff] [blame] | 115 | unsigned int off = 0; |
Christoph Hellwig | 8353a81 | 2017-09-02 09:53:41 -0700 | [diff] [blame] | 116 | unsigned long flags; |
Dave Chinner | 37992c1 | 2016-04-06 08:12:28 +1000 | [diff] [blame] | 117 | |
| 118 | ASSERT(bvec->bv_offset < PAGE_SIZE); |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 119 | ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0); |
Christoph Hellwig | 8353a81 | 2017-09-02 09:53:41 -0700 | [diff] [blame] | 120 | ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE); |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 121 | ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0); |
Dave Chinner | 37992c1 | 2016-04-06 08:12:28 +1000 | [diff] [blame] | 122 | |
Christoph Hellwig | 8353a81 | 2017-09-02 09:53:41 -0700 | [diff] [blame] | 123 | local_irq_save(flags); |
| 124 | bit_spin_lock(BH_Uptodate_Lock, &head->b_state); |
Dave Chinner | 37992c1 | 2016-04-06 08:12:28 +1000 | [diff] [blame] | 125 | do { |
Christoph Hellwig | 8353a81 | 2017-09-02 09:53:41 -0700 | [diff] [blame] | 126 | if (off >= bvec->bv_offset && |
| 127 | off < bvec->bv_offset + bvec->bv_len) { |
| 128 | ASSERT(buffer_async_write(bh)); |
| 129 | ASSERT(bh->b_end_io == NULL); |
| 130 | |
| 131 | if (error) { |
| 132 | mark_buffer_write_io_error(bh); |
| 133 | clear_buffer_uptodate(bh); |
| 134 | SetPageError(bvec->bv_page); |
| 135 | } else { |
| 136 | set_buffer_uptodate(bh); |
| 137 | } |
| 138 | clear_buffer_async_write(bh); |
| 139 | unlock_buffer(bh); |
| 140 | } else if (buffer_async_write(bh)) { |
| 141 | ASSERT(buffer_locked(bh)); |
| 142 | busy = true; |
| 143 | } |
| 144 | off += bh->b_size; |
| 145 | } while ((bh = bh->b_this_page) != head); |
| 146 | bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); |
| 147 | local_irq_restore(flags); |
| 148 | |
| 149 | if (!busy) |
| 150 | end_page_writeback(bvec->bv_page); |
Dave Chinner | 37992c1 | 2016-04-06 08:12:28 +1000 | [diff] [blame] | 151 | } |
| 152 | |
| 153 | /* |
| 154 | * We're now finished for good with this ioend structure. Update the page |
| 155 | * state, release holds on bios, and finally free up memory. Do not use the |
| 156 | * ioend after this. |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 157 | */ |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 158 | STATIC void |
| 159 | xfs_destroy_ioend( |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 160 | struct xfs_ioend *ioend, |
| 161 | int error) |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 162 | { |
Dave Chinner | 37992c1 | 2016-04-06 08:12:28 +1000 | [diff] [blame] | 163 | struct inode *inode = ioend->io_inode; |
Christoph Hellwig | 8353a81 | 2017-09-02 09:53:41 -0700 | [diff] [blame] | 164 | struct bio *bio = &ioend->io_inline_bio; |
| 165 | struct bio *last = ioend->io_bio, *next; |
| 166 | u64 start = bio->bi_iter.bi_sector; |
| 167 | bool quiet = bio_flagged(bio, BIO_QUIET); |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 168 | |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 169 | for (bio = &ioend->io_inline_bio; bio; bio = next) { |
Dave Chinner | 37992c1 | 2016-04-06 08:12:28 +1000 | [diff] [blame] | 170 | struct bio_vec *bvec; |
| 171 | int i; |
| 172 | |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 173 | /* |
| 174 | * For the last bio, bi_private points to the ioend, so we |
| 175 | * need to explicitly end the iteration here. |
| 176 | */ |
| 177 | if (bio == last) |
| 178 | next = NULL; |
| 179 | else |
| 180 | next = bio->bi_private; |
Dave Chinner | 37992c1 | 2016-04-06 08:12:28 +1000 | [diff] [blame] | 181 | |
| 182 | /* walk each page on bio, ending page IO on them */ |
| 183 | bio_for_each_segment_all(bvec, bio, i) |
| 184 | xfs_finish_page_writeback(inode, bvec, error); |
| 185 | |
| 186 | bio_put(bio); |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 187 | } |
Christoph Hellwig | 8353a81 | 2017-09-02 09:53:41 -0700 | [diff] [blame] | 188 | |
| 189 | if (unlikely(error && !quiet)) { |
| 190 | xfs_err_ratelimited(XFS_I(inode)->i_mount, |
| 191 | "writeback error on sector %llu", start); |
| 192 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | /* |
Christoph Hellwig | fc0063c | 2011-08-23 08:28:11 +0000 | [diff] [blame] | 196 | * Fast and loose check if this write could update the on-disk inode size. |
| 197 | */ |
| 198 | static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend) |
| 199 | { |
| 200 | return ioend->io_offset + ioend->io_size > |
| 201 | XFS_I(ioend->io_inode)->i_d.di_size; |
| 202 | } |
| 203 | |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 204 | STATIC int |
| 205 | xfs_setfilesize_trans_alloc( |
| 206 | struct xfs_ioend *ioend) |
| 207 | { |
| 208 | struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; |
| 209 | struct xfs_trans *tp; |
| 210 | int error; |
| 211 | |
Christoph Hellwig | 253f491 | 2016-04-06 09:19:55 +1000 | [diff] [blame] | 212 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); |
| 213 | if (error) |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 214 | return error; |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 215 | |
| 216 | ioend->io_append_trans = tp; |
| 217 | |
| 218 | /* |
Dave Chinner | 437a255 | 2012-11-28 13:01:00 +1100 | [diff] [blame] | 219 | * We may pass freeze protection with a transaction. So tell lockdep |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 220 | * we released it. |
| 221 | */ |
Oleg Nesterov | bee9182 | 2015-07-19 23:48:20 +0200 | [diff] [blame] | 222 | __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS); |
Jan Kara | d9457dc | 2012-06-12 16:20:39 +0200 | [diff] [blame] | 223 | /* |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 224 | * We hand off the transaction to the completion thread now, so |
| 225 | * clear the flag here. |
| 226 | */ |
Michal Hocko | 9070733 | 2017-05-03 14:53:12 -0700 | [diff] [blame] | 227 | current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 228 | return 0; |
| 229 | } |
| 230 | |
Christoph Hellwig | fc0063c | 2011-08-23 08:28:11 +0000 | [diff] [blame] | 231 | /* |
Christoph Hellwig | 2813d68 | 2011-12-18 20:00:12 +0000 | [diff] [blame] | 232 | * Update on-disk file size now that data has been written to disk. |
Lachlan McIlroy | ba87ea6 | 2007-05-08 13:49:46 +1000 | [diff] [blame] | 233 | */ |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 234 | STATIC int |
Christoph Hellwig | e372843 | 2016-09-19 11:26:41 +1000 | [diff] [blame] | 235 | __xfs_setfilesize( |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 236 | struct xfs_inode *ip, |
| 237 | struct xfs_trans *tp, |
| 238 | xfs_off_t offset, |
| 239 | size_t size) |
Lachlan McIlroy | ba87ea6 | 2007-05-08 13:49:46 +1000 | [diff] [blame] | 240 | { |
Lachlan McIlroy | ba87ea6 | 2007-05-08 13:49:46 +1000 | [diff] [blame] | 241 | xfs_fsize_t isize; |
Lachlan McIlroy | ba87ea6 | 2007-05-08 13:49:46 +1000 | [diff] [blame] | 242 | |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 243 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 244 | isize = xfs_new_eof(ip, offset + size); |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 245 | if (!isize) { |
| 246 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
Christoph Hellwig | 4906e21 | 2015-06-04 13:47:56 +1000 | [diff] [blame] | 247 | xfs_trans_cancel(tp); |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 248 | return 0; |
Lachlan McIlroy | ba87ea6 | 2007-05-08 13:49:46 +1000 | [diff] [blame] | 249 | } |
| 250 | |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 251 | trace_xfs_setfilesize(ip, offset, size); |
Christoph Hellwig | 281627d | 2012-03-13 08:41:05 +0000 | [diff] [blame] | 252 | |
| 253 | ip->i_d.di_size = isize; |
| 254 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
| 255 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
| 256 | |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 257 | return xfs_trans_commit(tp); |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 258 | } |
| 259 | |
Christoph Hellwig | e372843 | 2016-09-19 11:26:41 +1000 | [diff] [blame] | 260 | int |
| 261 | xfs_setfilesize( |
| 262 | struct xfs_inode *ip, |
| 263 | xfs_off_t offset, |
| 264 | size_t size) |
| 265 | { |
| 266 | struct xfs_mount *mp = ip->i_mount; |
| 267 | struct xfs_trans *tp; |
| 268 | int error; |
| 269 | |
| 270 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); |
| 271 | if (error) |
| 272 | return error; |
| 273 | |
| 274 | return __xfs_setfilesize(ip, tp, offset, size); |
| 275 | } |
| 276 | |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 277 | STATIC int |
| 278 | xfs_setfilesize_ioend( |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 279 | struct xfs_ioend *ioend, |
| 280 | int error) |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 281 | { |
| 282 | struct xfs_inode *ip = XFS_I(ioend->io_inode); |
| 283 | struct xfs_trans *tp = ioend->io_append_trans; |
| 284 | |
| 285 | /* |
| 286 | * The transaction may have been allocated in the I/O submission thread, |
| 287 | * thus we need to mark ourselves as being in a transaction manually. |
| 288 | * Similarly for freeze protection. |
| 289 | */ |
Michal Hocko | 9070733 | 2017-05-03 14:53:12 -0700 | [diff] [blame] | 290 | current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); |
Oleg Nesterov | bee9182 | 2015-07-19 23:48:20 +0200 | [diff] [blame] | 291 | __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 292 | |
Zhaohongjiang | 5cb13dc | 2015-10-12 15:28:39 +1100 | [diff] [blame] | 293 | /* we abort the update if there was an IO error */ |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 294 | if (error) { |
Zhaohongjiang | 5cb13dc | 2015-10-12 15:28:39 +1100 | [diff] [blame] | 295 | xfs_trans_cancel(tp); |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 296 | return error; |
Zhaohongjiang | 5cb13dc | 2015-10-12 15:28:39 +1100 | [diff] [blame] | 297 | } |
| 298 | |
Christoph Hellwig | e372843 | 2016-09-19 11:26:41 +1000 | [diff] [blame] | 299 | return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); |
Christoph Hellwig | 2ba6623 | 2015-02-02 10:02:09 +1100 | [diff] [blame] | 300 | } |
| 301 | |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 302 | /* |
Dave Chinner | 77d7a0c | 2010-02-17 05:36:29 +0000 | [diff] [blame] | 303 | * IO write completion. |
| 304 | */ |
| 305 | STATIC void |
| 306 | xfs_end_io( |
| 307 | struct work_struct *work) |
| 308 | { |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 309 | struct xfs_ioend *ioend = |
| 310 | container_of(work, struct xfs_ioend, io_work); |
| 311 | struct xfs_inode *ip = XFS_I(ioend->io_inode); |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 312 | xfs_off_t offset = ioend->io_offset; |
| 313 | size_t size = ioend->io_size; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 314 | int error; |
Dave Chinner | 77d7a0c | 2010-02-17 05:36:29 +0000 | [diff] [blame] | 315 | |
Brian Foster | af055e3 | 2016-02-08 15:00:02 +1100 | [diff] [blame] | 316 | /* |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 317 | * Just clean up the in-memory strutures if the fs has been shut down. |
Brian Foster | af055e3 | 2016-02-08 15:00:02 +1100 | [diff] [blame] | 318 | */ |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 319 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 320 | error = -EIO; |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 321 | goto done; |
Darrick J. Wong | 43caeb1 | 2016-10-03 09:11:35 -0700 | [diff] [blame] | 322 | } |
| 323 | |
| 324 | /* |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 325 | * Clean up any COW blocks on an I/O error. |
Dave Chinner | 77d7a0c | 2010-02-17 05:36:29 +0000 | [diff] [blame] | 326 | */ |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 327 | error = blk_status_to_errno(ioend->io_bio->bi_status); |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 328 | if (unlikely(error)) { |
| 329 | switch (ioend->io_type) { |
| 330 | case XFS_IO_COW: |
| 331 | xfs_reflink_cancel_cow_range(ip, offset, size, true); |
| 332 | break; |
| 333 | } |
| 334 | |
| 335 | goto done; |
| 336 | } |
| 337 | |
| 338 | /* |
| 339 | * Success: commit the COW or unwritten blocks if needed. |
| 340 | */ |
| 341 | switch (ioend->io_type) { |
| 342 | case XFS_IO_COW: |
| 343 | error = xfs_reflink_end_cow(ip, offset, size); |
| 344 | break; |
| 345 | case XFS_IO_UNWRITTEN: |
Eryu Guan | ee70daa | 2017-09-21 11:26:18 -0700 | [diff] [blame] | 346 | /* writeback should never update isize */ |
| 347 | error = xfs_iomap_write_unwritten(ip, offset, size, false); |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 348 | break; |
| 349 | default: |
| 350 | ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans); |
| 351 | break; |
Dave Chinner | 77d7a0c | 2010-02-17 05:36:29 +0000 | [diff] [blame] | 352 | } |
| 353 | |
Christoph Hellwig | 04f658e | 2011-08-24 05:59:25 +0000 | [diff] [blame] | 354 | done: |
Christoph Hellwig | 787eb48 | 2017-03-02 15:02:51 -0800 | [diff] [blame] | 355 | if (ioend->io_append_trans) |
| 356 | error = xfs_setfilesize_ioend(ioend, error); |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 357 | xfs_destroy_ioend(ioend, error); |
Dave Chinner | 77d7a0c | 2010-02-17 05:36:29 +0000 | [diff] [blame] | 358 | } |
| 359 | |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 360 | STATIC void |
| 361 | xfs_end_bio( |
| 362 | struct bio *bio) |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 363 | { |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 364 | struct xfs_ioend *ioend = bio->bi_private; |
| 365 | struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 366 | |
Darrick J. Wong | 43caeb1 | 2016-10-03 09:11:35 -0700 | [diff] [blame] | 367 | if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW) |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 368 | queue_work(mp->m_unwritten_workqueue, &ioend->io_work); |
| 369 | else if (ioend->io_append_trans) |
| 370 | queue_work(mp->m_data_workqueue, &ioend->io_work); |
| 371 | else |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 372 | xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status)); |
Christoph Hellwig | 0829c36 | 2005-09-02 16:58:49 +1000 | [diff] [blame] | 373 | } |
| 374 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | STATIC int |
| 376 | xfs_map_blocks( |
| 377 | struct inode *inode, |
| 378 | loff_t offset, |
Christoph Hellwig | 207d041 | 2010-04-28 12:28:56 +0000 | [diff] [blame] | 379 | struct xfs_bmbt_irec *imap, |
Dave Chinner | 988ef92 | 2016-02-15 17:20:50 +1100 | [diff] [blame] | 380 | int type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | { |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 382 | struct xfs_inode *ip = XFS_I(inode); |
| 383 | struct xfs_mount *mp = ip->i_mount; |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 384 | ssize_t count = i_blocksize(inode); |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 385 | xfs_fileoff_t offset_fsb, end_fsb; |
| 386 | int error = 0; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 387 | int bmapi_flags = XFS_BMAPI_ENTIRE; |
| 388 | int nimaps = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 390 | if (XFS_FORCED_SHUTDOWN(mp)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 391 | return -EIO; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 392 | |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 393 | ASSERT(type != XFS_IO_COW); |
Alain Renaud | 0d882a3 | 2012-05-22 15:56:21 -0500 | [diff] [blame] | 394 | if (type == XFS_IO_UNWRITTEN) |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 395 | bmapi_flags |= XFS_BMAPI_IGSTATE; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 396 | |
Dave Chinner | 988ef92 | 2016-02-15 17:20:50 +1100 | [diff] [blame] | 397 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
Christoph Hellwig | 8ff2957 | 2010-12-10 08:42:21 +0000 | [diff] [blame] | 398 | ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || |
| 399 | (ip->i_df.if_flags & XFS_IFEXTENTS)); |
Dave Chinner | d2c2819 | 2012-06-08 15:44:53 +1000 | [diff] [blame] | 400 | ASSERT(offset <= mp->m_super->s_maxbytes); |
Christoph Hellwig | 8ff2957 | 2010-12-10 08:42:21 +0000 | [diff] [blame] | 401 | |
Darrick J. Wong | 22a6c83 | 2017-11-27 09:50:17 -0800 | [diff] [blame] | 402 | if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes) |
Dave Chinner | d2c2819 | 2012-06-08 15:44:53 +1000 | [diff] [blame] | 403 | count = mp->m_super->s_maxbytes - offset; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 404 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); |
| 405 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
Dave Chinner | 5c8ed20 | 2011-09-18 20:40:45 +0000 | [diff] [blame] | 406 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, |
| 407 | imap, &nimaps, bmapi_flags); |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 408 | /* |
| 409 | * Truncate an overwrite extent if there's a pending CoW |
| 410 | * reservation before the end of this extent. This forces us |
| 411 | * to come back to writepage to take care of the CoW. |
| 412 | */ |
| 413 | if (nimaps && type == XFS_IO_OVERWRITE) |
| 414 | xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap); |
Christoph Hellwig | 8ff2957 | 2010-12-10 08:42:21 +0000 | [diff] [blame] | 415 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
| 416 | |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 417 | if (error) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 418 | return error; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 419 | |
Alain Renaud | 0d882a3 | 2012-05-22 15:56:21 -0500 | [diff] [blame] | 420 | if (type == XFS_IO_DELALLOC && |
Christoph Hellwig | 8ff2957 | 2010-12-10 08:42:21 +0000 | [diff] [blame] | 421 | (!nimaps || isnullstartblock(imap->br_startblock))) { |
Darrick J. Wong | 60b4984 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 422 | error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset, |
| 423 | imap); |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 424 | if (!error) |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 425 | trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 426 | return error; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 427 | } |
| 428 | |
Christoph Hellwig | 8ff2957 | 2010-12-10 08:42:21 +0000 | [diff] [blame] | 429 | #ifdef DEBUG |
Alain Renaud | 0d882a3 | 2012-05-22 15:56:21 -0500 | [diff] [blame] | 430 | if (type == XFS_IO_UNWRITTEN) { |
Christoph Hellwig | 8ff2957 | 2010-12-10 08:42:21 +0000 | [diff] [blame] | 431 | ASSERT(nimaps); |
| 432 | ASSERT(imap->br_startblock != HOLESTARTBLOCK); |
| 433 | ASSERT(imap->br_startblock != DELAYSTARTBLOCK); |
| 434 | } |
| 435 | #endif |
| 436 | if (nimaps) |
| 437 | trace_xfs_map_blocks_found(ip, offset, count, type, imap); |
| 438 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | } |
| 440 | |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 441 | STATIC bool |
Christoph Hellwig | 558e689 | 2010-04-28 12:28:58 +0000 | [diff] [blame] | 442 | xfs_imap_valid( |
Christoph Hellwig | 8699bb0 | 2010-04-28 12:28:54 +0000 | [diff] [blame] | 443 | struct inode *inode, |
Christoph Hellwig | 207d041 | 2010-04-28 12:28:56 +0000 | [diff] [blame] | 444 | struct xfs_bmbt_irec *imap, |
Christoph Hellwig | 558e689 | 2010-04-28 12:28:58 +0000 | [diff] [blame] | 445 | xfs_off_t offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | { |
Christoph Hellwig | 558e689 | 2010-04-28 12:28:58 +0000 | [diff] [blame] | 447 | offset >>= inode->i_blkbits; |
Christoph Hellwig | 8699bb0 | 2010-04-28 12:28:54 +0000 | [diff] [blame] | 448 | |
Brian Foster | 40214d1 | 2017-10-13 09:47:46 -0700 | [diff] [blame] | 449 | /* |
| 450 | * We have to make sure the cached mapping is within EOF to protect |
| 451 | * against eofblocks trimming on file release leaving us with a stale |
| 452 | * mapping. Otherwise, a page for a subsequent file extending buffered |
| 453 | * write could get picked up by this writeback cycle and written to the |
| 454 | * wrong blocks. |
| 455 | * |
| 456 | * Note that what we really want here is a generic mapping invalidation |
| 457 | * mechanism to protect us from arbitrary extent modifying contexts, not |
| 458 | * just eofblocks. |
| 459 | */ |
| 460 | xfs_trim_extent_eof(imap, XFS_I(inode)); |
| 461 | |
Christoph Hellwig | 558e689 | 2010-04-28 12:28:58 +0000 | [diff] [blame] | 462 | return offset >= imap->br_startoff && |
| 463 | offset < imap->br_startoff + imap->br_blockcount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | } |
| 465 | |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 466 | STATIC void |
| 467 | xfs_start_buffer_writeback( |
| 468 | struct buffer_head *bh) |
| 469 | { |
| 470 | ASSERT(buffer_mapped(bh)); |
| 471 | ASSERT(buffer_locked(bh)); |
| 472 | ASSERT(!buffer_delay(bh)); |
| 473 | ASSERT(!buffer_unwritten(bh)); |
| 474 | |
Christoph Hellwig | 8353a81 | 2017-09-02 09:53:41 -0700 | [diff] [blame] | 475 | bh->b_end_io = NULL; |
| 476 | set_buffer_async_write(bh); |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 477 | set_buffer_uptodate(bh); |
| 478 | clear_buffer_dirty(bh); |
| 479 | } |
| 480 | |
| 481 | STATIC void |
| 482 | xfs_start_page_writeback( |
| 483 | struct page *page, |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 484 | int clear_dirty) |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 485 | { |
| 486 | ASSERT(PageLocked(page)); |
| 487 | ASSERT(!PageWriteback(page)); |
Dave Chinner | 0d085a5 | 2014-09-23 15:36:27 +1000 | [diff] [blame] | 488 | |
| 489 | /* |
| 490 | * if the page was not fully cleaned, we need to ensure that the higher |
| 491 | * layers come back to it correctly. That means we need to keep the page |
| 492 | * dirty, and for WB_SYNC_ALL writeback we need to ensure the |
| 493 | * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to |
| 494 | * write this page in this writeback sweep will be made. |
| 495 | */ |
| 496 | if (clear_dirty) { |
David Chinner | 9213202 | 2006-12-21 10:24:01 +1100 | [diff] [blame] | 497 | clear_page_dirty_for_io(page); |
Dave Chinner | 0d085a5 | 2014-09-23 15:36:27 +1000 | [diff] [blame] | 498 | set_page_writeback(page); |
| 499 | } else |
| 500 | set_page_writeback_keepwrite(page); |
| 501 | |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 502 | unlock_page(page); |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 503 | } |
| 504 | |
Zhi Yong Wu | c7c1a7d | 2013-08-07 10:11:09 +0000 | [diff] [blame] | 505 | static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh) |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 506 | { |
| 507 | return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); |
| 508 | } |
| 509 | |
| 510 | /* |
Dave Chinner | bb18782 | 2016-04-06 08:11:25 +1000 | [diff] [blame] | 511 | * Submit the bio for an ioend. We are passed an ioend with a bio attached to |
| 512 | * it, and we submit that bio. The ioend may be used for multiple bio |
| 513 | * submissions, so we only want to allocate an append transaction for the ioend |
| 514 | * once. In the case of multiple bio submission, each bio will take an IO |
| 515 | * reference to the ioend to ensure that the ioend completion is only done once |
| 516 | * all bios have been submitted and the ioend is really done. |
Dave Chinner | 7bf7f35 | 2012-11-12 22:09:45 +1100 | [diff] [blame] | 517 | * |
| 518 | * If @fail is non-zero, it means that we have a situation where some part of |
| 519 | * the submission process has failed after we have marked paged for writeback |
Dave Chinner | bb18782 | 2016-04-06 08:11:25 +1000 | [diff] [blame] | 520 | * and unlocked them. In this situation, we need to fail the bio and ioend |
| 521 | * rather than submit it to IO. This typically only happens on a filesystem |
| 522 | * shutdown. |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 523 | */ |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 524 | STATIC int |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 525 | xfs_submit_ioend( |
Christoph Hellwig | 06342cf | 2009-10-30 09:09:15 +0000 | [diff] [blame] | 526 | struct writeback_control *wbc, |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 527 | struct xfs_ioend *ioend, |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 528 | int status) |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 529 | { |
Darrick J. Wong | 5eda430 | 2017-02-02 15:14:02 -0800 | [diff] [blame] | 530 | /* Convert CoW extents to regular */ |
| 531 | if (!status && ioend->io_type == XFS_IO_COW) { |
| 532 | status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), |
| 533 | ioend->io_offset, ioend->io_size); |
| 534 | } |
| 535 | |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 536 | /* Reserve log space if we might write beyond the on-disk inode size. */ |
| 537 | if (!status && |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 538 | ioend->io_type != XFS_IO_UNWRITTEN && |
Dave Chinner | bb18782 | 2016-04-06 08:11:25 +1000 | [diff] [blame] | 539 | xfs_ioend_is_append(ioend) && |
| 540 | !ioend->io_append_trans) |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 541 | status = xfs_setfilesize_trans_alloc(ioend); |
Dave Chinner | bb18782 | 2016-04-06 08:11:25 +1000 | [diff] [blame] | 542 | |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 543 | ioend->io_bio->bi_private = ioend; |
| 544 | ioend->io_bio->bi_end_io = xfs_end_bio; |
Jens Axboe | 7637241 | 2016-11-01 10:00:38 -0600 | [diff] [blame] | 545 | ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); |
Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 546 | |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 547 | /* |
| 548 | * If we are failing the IO now, just mark the ioend with an |
| 549 | * error and finish it. This will run IO completion immediately |
| 550 | * as there is only one reference to the ioend at this point in |
| 551 | * time. |
| 552 | */ |
| 553 | if (status) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 554 | ioend->io_bio->bi_status = errno_to_blk_status(status); |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 555 | bio_endio(ioend->io_bio); |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 556 | return status; |
| 557 | } |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 558 | |
Jens Axboe | 31d7d58 | 2017-06-27 09:34:01 -0600 | [diff] [blame] | 559 | ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint; |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 560 | submit_bio(ioend->io_bio); |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 561 | return 0; |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 562 | } |
| 563 | |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 564 | static void |
| 565 | xfs_init_bio_from_bh( |
| 566 | struct bio *bio, |
| 567 | struct buffer_head *bh) |
| 568 | { |
| 569 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 570 | bio_set_dev(bio, bh->b_bdev); |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 571 | } |
| 572 | |
| 573 | static struct xfs_ioend * |
| 574 | xfs_alloc_ioend( |
| 575 | struct inode *inode, |
| 576 | unsigned int type, |
| 577 | xfs_off_t offset, |
| 578 | struct buffer_head *bh) |
| 579 | { |
| 580 | struct xfs_ioend *ioend; |
| 581 | struct bio *bio; |
| 582 | |
| 583 | bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset); |
| 584 | xfs_init_bio_from_bh(bio, bh); |
| 585 | |
| 586 | ioend = container_of(bio, struct xfs_ioend, io_inline_bio); |
| 587 | INIT_LIST_HEAD(&ioend->io_list); |
| 588 | ioend->io_type = type; |
| 589 | ioend->io_inode = inode; |
| 590 | ioend->io_size = 0; |
| 591 | ioend->io_offset = offset; |
| 592 | INIT_WORK(&ioend->io_work, xfs_end_io); |
| 593 | ioend->io_append_trans = NULL; |
| 594 | ioend->io_bio = bio; |
| 595 | return ioend; |
| 596 | } |
| 597 | |
| 598 | /* |
| 599 | * Allocate a new bio, and chain the old bio to the new one. |
| 600 | * |
| 601 | * Note that we have to do perform the chaining in this unintuitive order |
| 602 | * so that the bi_private linkage is set up in the right direction for the |
| 603 | * traversal in xfs_destroy_ioend(). |
| 604 | */ |
| 605 | static void |
| 606 | xfs_chain_bio( |
| 607 | struct xfs_ioend *ioend, |
| 608 | struct writeback_control *wbc, |
| 609 | struct buffer_head *bh) |
| 610 | { |
| 611 | struct bio *new; |
| 612 | |
| 613 | new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); |
| 614 | xfs_init_bio_from_bh(new, bh); |
| 615 | |
| 616 | bio_chain(ioend->io_bio, new); |
| 617 | bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ |
Jens Axboe | 7637241 | 2016-11-01 10:00:38 -0600 | [diff] [blame] | 618 | ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); |
Jens Axboe | 31d7d58 | 2017-06-27 09:34:01 -0600 | [diff] [blame] | 619 | ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint; |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 620 | submit_bio(ioend->io_bio); |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 621 | ioend->io_bio = new; |
| 622 | } |
| 623 | |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 624 | /* |
| 625 | * Test to see if we've been building up a completion structure for |
| 626 | * earlier buffers -- if so, we try to append to this ioend if we |
| 627 | * can, otherwise we finish off any current ioend and start another. |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 628 | * Return the ioend we finished off so that the caller can submit it |
| 629 | * once it has finished processing the dirty page. |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 630 | */ |
| 631 | STATIC void |
| 632 | xfs_add_to_ioend( |
| 633 | struct inode *inode, |
| 634 | struct buffer_head *bh, |
Christoph Hellwig | 7336cea | 2006-01-11 20:49:16 +1100 | [diff] [blame] | 635 | xfs_off_t offset, |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 636 | struct xfs_writepage_ctx *wpc, |
Dave Chinner | bb18782 | 2016-04-06 08:11:25 +1000 | [diff] [blame] | 637 | struct writeback_control *wbc, |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 638 | struct list_head *iolist) |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 639 | { |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 640 | if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type || |
Darrick J. Wong | 0df61da | 2016-03-07 09:32:14 +1100 | [diff] [blame] | 641 | bh->b_blocknr != wpc->last_block + 1 || |
| 642 | offset != wpc->ioend->io_offset + wpc->ioend->io_size) { |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 643 | if (wpc->ioend) |
| 644 | list_add(&wpc->ioend->io_list, iolist); |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 645 | wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh); |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 646 | } |
| 647 | |
Christoph Hellwig | 0e51a8e | 2016-04-06 08:34:30 +1000 | [diff] [blame] | 648 | /* |
| 649 | * If the buffer doesn't fit into the bio we need to allocate a new |
| 650 | * one. This shouldn't happen more than once for a given buffer. |
| 651 | */ |
| 652 | while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size) |
| 653 | xfs_chain_bio(wpc->ioend, wbc, bh); |
Dave Chinner | bb18782 | 2016-04-06 08:11:25 +1000 | [diff] [blame] | 654 | |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 655 | wpc->ioend->io_size += bh->b_size; |
| 656 | wpc->last_block = bh->b_blocknr; |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 657 | xfs_start_buffer_writeback(bh); |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 658 | } |
| 659 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | STATIC void |
Nathan Scott | 87cbc49 | 2006-03-14 13:26:43 +1100 | [diff] [blame] | 661 | xfs_map_buffer( |
Christoph Hellwig | 046f168 | 2010-04-28 12:28:52 +0000 | [diff] [blame] | 662 | struct inode *inode, |
Nathan Scott | 87cbc49 | 2006-03-14 13:26:43 +1100 | [diff] [blame] | 663 | struct buffer_head *bh, |
Christoph Hellwig | 207d041 | 2010-04-28 12:28:56 +0000 | [diff] [blame] | 664 | struct xfs_bmbt_irec *imap, |
Christoph Hellwig | 046f168 | 2010-04-28 12:28:52 +0000 | [diff] [blame] | 665 | xfs_off_t offset) |
Nathan Scott | 87cbc49 | 2006-03-14 13:26:43 +1100 | [diff] [blame] | 666 | { |
| 667 | sector_t bn; |
Christoph Hellwig | 8699bb0 | 2010-04-28 12:28:54 +0000 | [diff] [blame] | 668 | struct xfs_mount *m = XFS_I(inode)->i_mount; |
Christoph Hellwig | 207d041 | 2010-04-28 12:28:56 +0000 | [diff] [blame] | 669 | xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff); |
| 670 | xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock); |
Nathan Scott | 87cbc49 | 2006-03-14 13:26:43 +1100 | [diff] [blame] | 671 | |
Christoph Hellwig | 207d041 | 2010-04-28 12:28:56 +0000 | [diff] [blame] | 672 | ASSERT(imap->br_startblock != HOLESTARTBLOCK); |
| 673 | ASSERT(imap->br_startblock != DELAYSTARTBLOCK); |
Nathan Scott | 87cbc49 | 2006-03-14 13:26:43 +1100 | [diff] [blame] | 674 | |
Christoph Hellwig | e513182 | 2010-04-28 12:28:55 +0000 | [diff] [blame] | 675 | bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) + |
Christoph Hellwig | 8699bb0 | 2010-04-28 12:28:54 +0000 | [diff] [blame] | 676 | ((offset - iomap_offset) >> inode->i_blkbits); |
Nathan Scott | 87cbc49 | 2006-03-14 13:26:43 +1100 | [diff] [blame] | 677 | |
Christoph Hellwig | 046f168 | 2010-04-28 12:28:52 +0000 | [diff] [blame] | 678 | ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode))); |
Nathan Scott | 87cbc49 | 2006-03-14 13:26:43 +1100 | [diff] [blame] | 679 | |
| 680 | bh->b_blocknr = bn; |
| 681 | set_buffer_mapped(bh); |
| 682 | } |
| 683 | |
| 684 | STATIC void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | xfs_map_at_offset( |
Christoph Hellwig | 046f168 | 2010-04-28 12:28:52 +0000 | [diff] [blame] | 686 | struct inode *inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | struct buffer_head *bh, |
Christoph Hellwig | 207d041 | 2010-04-28 12:28:56 +0000 | [diff] [blame] | 688 | struct xfs_bmbt_irec *imap, |
Christoph Hellwig | 046f168 | 2010-04-28 12:28:52 +0000 | [diff] [blame] | 689 | xfs_off_t offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | { |
Christoph Hellwig | 207d041 | 2010-04-28 12:28:56 +0000 | [diff] [blame] | 691 | ASSERT(imap->br_startblock != HOLESTARTBLOCK); |
| 692 | ASSERT(imap->br_startblock != DELAYSTARTBLOCK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | |
Christoph Hellwig | 207d041 | 2010-04-28 12:28:56 +0000 | [diff] [blame] | 694 | xfs_map_buffer(inode, bh, imap, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | set_buffer_mapped(bh); |
| 696 | clear_buffer_delay(bh); |
Christoph Hellwig | f6d6d4f | 2006-01-11 15:40:13 +1100 | [diff] [blame] | 697 | clear_buffer_unwritten(bh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | } |
| 699 | |
| 700 | /* |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 701 | * Test if a given page contains at least one buffer of a given @type. |
| 702 | * If @check_all_buffers is true, then we walk all the buffers in the page to |
| 703 | * try to find one of the type passed in. If it is not set, then the caller only |
| 704 | * needs to check the first buffer on the page for a match. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 | */ |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 706 | STATIC bool |
Dave Chinner | 6ffc4db | 2012-04-23 15:58:43 +1000 | [diff] [blame] | 707 | xfs_check_page_type( |
Christoph Hellwig | 10ce444 | 2006-01-11 20:48:14 +1100 | [diff] [blame] | 708 | struct page *page, |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 709 | unsigned int type, |
| 710 | bool check_all_buffers) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | { |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 712 | struct buffer_head *bh; |
| 713 | struct buffer_head *head; |
| 714 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | if (PageWriteback(page)) |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 716 | return false; |
| 717 | if (!page->mapping) |
| 718 | return false; |
| 719 | if (!page_has_buffers(page)) |
| 720 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 | |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 722 | bh = head = page_buffers(page); |
| 723 | do { |
| 724 | if (buffer_unwritten(bh)) { |
| 725 | if (type == XFS_IO_UNWRITTEN) |
| 726 | return true; |
| 727 | } else if (buffer_delay(bh)) { |
Dan Carpenter | 805eeb8 | 2014-04-04 06:56:30 +1100 | [diff] [blame] | 728 | if (type == XFS_IO_DELALLOC) |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 729 | return true; |
| 730 | } else if (buffer_dirty(bh) && buffer_mapped(bh)) { |
Dan Carpenter | 805eeb8 | 2014-04-04 06:56:30 +1100 | [diff] [blame] | 731 | if (type == XFS_IO_OVERWRITE) |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 732 | return true; |
| 733 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 735 | /* If we are only checking the first buffer, we are done now. */ |
| 736 | if (!check_all_buffers) |
| 737 | break; |
| 738 | } while ((bh = bh->b_this_page) != head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 740 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | } |
| 742 | |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 743 | STATIC void |
| 744 | xfs_vm_invalidatepage( |
| 745 | struct page *page, |
Lukas Czerner | d47992f | 2013-05-21 23:17:23 -0400 | [diff] [blame] | 746 | unsigned int offset, |
| 747 | unsigned int length) |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 748 | { |
Lukas Czerner | 34097df | 2013-05-21 23:58:01 -0400 | [diff] [blame] | 749 | trace_xfs_invalidatepage(page->mapping->host, page, offset, |
| 750 | length); |
Dave Chinner | 793d7db | 2017-10-13 09:47:45 -0700 | [diff] [blame] | 751 | |
| 752 | /* |
| 753 | * If we are invalidating the entire page, clear the dirty state from it |
| 754 | * so that we can check for attempts to release dirty cached pages in |
| 755 | * xfs_vm_releasepage(). |
| 756 | */ |
| 757 | if (offset == 0 && length >= PAGE_SIZE) |
| 758 | cancel_dirty_page(page); |
Lukas Czerner | 34097df | 2013-05-21 23:58:01 -0400 | [diff] [blame] | 759 | block_invalidatepage(page, offset, length); |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 760 | } |
| 761 | |
| 762 | /* |
| 763 | * If the page has delalloc buffers on it, we need to punch them out before we |
| 764 | * invalidate the page. If we don't, we leave a stale delalloc mapping on the |
| 765 | * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read |
| 766 | * is done on that same region - the delalloc extent is returned when none is |
| 767 | * supposed to be there. |
| 768 | * |
| 769 | * We prevent this by truncating away the delalloc regions on the page before |
| 770 | * invalidating it. Because they are delalloc, we can do this without needing a |
| 771 | * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this |
| 772 | * truncation without a transaction as there is no space left for block |
| 773 | * reservation (typically why we see a ENOSPC in writeback). |
| 774 | * |
| 775 | * This is not a performance critical path, so for now just do the punching a |
| 776 | * buffer head at a time. |
| 777 | */ |
| 778 | STATIC void |
| 779 | xfs_aops_discard_page( |
| 780 | struct page *page) |
| 781 | { |
| 782 | struct inode *inode = page->mapping->host; |
| 783 | struct xfs_inode *ip = XFS_I(inode); |
| 784 | struct buffer_head *bh, *head; |
| 785 | loff_t offset = page_offset(page); |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 786 | |
Dave Chinner | a49935f | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 787 | if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true)) |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 788 | goto out_invalidate; |
| 789 | |
Dave Chinner | e8c3753 | 2010-03-15 02:36:35 +0000 | [diff] [blame] | 790 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
| 791 | goto out_invalidate; |
| 792 | |
Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 793 | xfs_alert(ip->i_mount, |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 794 | "page discard on page %p, inode 0x%llx, offset %llu.", |
| 795 | page, ip->i_ino, offset); |
| 796 | |
| 797 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
| 798 | bh = head = page_buffers(page); |
| 799 | do { |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 800 | int error; |
Dave Chinner | c726de4 | 2010-11-30 15:14:39 +1100 | [diff] [blame] | 801 | xfs_fileoff_t start_fsb; |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 802 | |
| 803 | if (!buffer_delay(bh)) |
| 804 | goto next_buffer; |
| 805 | |
Dave Chinner | c726de4 | 2010-11-30 15:14:39 +1100 | [diff] [blame] | 806 | start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); |
| 807 | error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1); |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 808 | if (error) { |
| 809 | /* something screwed, just bail */ |
Dave Chinner | e8c3753 | 2010-03-15 02:36:35 +0000 | [diff] [blame] | 810 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 811 | xfs_alert(ip->i_mount, |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 812 | "page discard unable to remove delalloc mapping."); |
Dave Chinner | e8c3753 | 2010-03-15 02:36:35 +0000 | [diff] [blame] | 813 | } |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 814 | break; |
| 815 | } |
| 816 | next_buffer: |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 817 | offset += i_blocksize(inode); |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 818 | |
| 819 | } while ((bh = bh->b_this_page) != head); |
| 820 | |
| 821 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 822 | out_invalidate: |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 823 | xfs_vm_invalidatepage(page, 0, PAGE_SIZE); |
Dave Chinner | 3ed3a43 | 2010-03-05 02:00:42 +0000 | [diff] [blame] | 824 | return; |
| 825 | } |
| 826 | |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 827 | static int |
| 828 | xfs_map_cow( |
| 829 | struct xfs_writepage_ctx *wpc, |
| 830 | struct inode *inode, |
| 831 | loff_t offset, |
| 832 | unsigned int *new_type) |
| 833 | { |
| 834 | struct xfs_inode *ip = XFS_I(inode); |
| 835 | struct xfs_bmbt_irec imap; |
Christoph Hellwig | 092d5d9 | 2016-11-24 11:39:49 +1100 | [diff] [blame] | 836 | bool is_cow = false; |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 837 | int error; |
| 838 | |
| 839 | /* |
| 840 | * If we already have a valid COW mapping keep using it. |
| 841 | */ |
| 842 | if (wpc->io_type == XFS_IO_COW) { |
| 843 | wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset); |
| 844 | if (wpc->imap_valid) { |
| 845 | *new_type = XFS_IO_COW; |
| 846 | return 0; |
| 847 | } |
| 848 | } |
| 849 | |
| 850 | /* |
| 851 | * Else we need to check if there is a COW mapping at this offset. |
| 852 | */ |
| 853 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
Christoph Hellwig | 092d5d9 | 2016-11-24 11:39:49 +1100 | [diff] [blame] | 854 | is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap); |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 855 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
| 856 | |
| 857 | if (!is_cow) |
| 858 | return 0; |
| 859 | |
| 860 | /* |
| 861 | * And if the COW mapping has a delayed extent here we need to |
| 862 | * allocate real space for it now. |
| 863 | */ |
Christoph Hellwig | 092d5d9 | 2016-11-24 11:39:49 +1100 | [diff] [blame] | 864 | if (isnullstartblock(imap.br_startblock)) { |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 865 | error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset, |
| 866 | &imap); |
| 867 | if (error) |
| 868 | return error; |
| 869 | } |
| 870 | |
| 871 | wpc->io_type = *new_type = XFS_IO_COW; |
| 872 | wpc->imap_valid = true; |
| 873 | wpc->imap = imap; |
| 874 | return 0; |
| 875 | } |
| 876 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | /* |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 878 | * We implement an immediate ioend submission policy here to avoid needing to |
| 879 | * chain multiple ioends and hence nest mempool allocations which can violate |
| 880 | * forward progress guarantees we need to provide. The current ioend we are |
| 881 | * adding buffers to is cached on the writepage context, and if the new buffer |
| 882 | * does not append to the cached ioend it will create a new ioend and cache that |
| 883 | * instead. |
| 884 | * |
| 885 | * If a new ioend is created and cached, the old ioend is returned and queued |
| 886 | * locally for submission once the entire page is processed or an error has been |
| 887 | * detected. While ioends are submitted immediately after they are completed, |
| 888 | * batching optimisations are provided by higher level block plugging. |
| 889 | * |
| 890 | * At the end of a writeback pass, there will be a cached ioend remaining on the |
| 891 | * writepage context that the caller will need to submit. |
| 892 | */ |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 893 | static int |
| 894 | xfs_writepage_map( |
| 895 | struct xfs_writepage_ctx *wpc, |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 896 | struct writeback_control *wbc, |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 897 | struct inode *inode, |
| 898 | struct page *page, |
Darrick J. Wong | 2d5f4b5 | 2017-11-27 09:50:22 -0800 | [diff] [blame] | 899 | uint64_t end_offset) |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 900 | { |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 901 | LIST_HEAD(submit_list); |
| 902 | struct xfs_ioend *ioend, *next; |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 903 | struct buffer_head *bh, *head; |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 904 | ssize_t len = i_blocksize(inode); |
Darrick J. Wong | 2d5f4b5 | 2017-11-27 09:50:22 -0800 | [diff] [blame] | 905 | uint64_t offset; |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 906 | int error = 0; |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 907 | int count = 0; |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 908 | int uptodate = 1; |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 909 | unsigned int new_type; |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 910 | |
| 911 | bh = head = page_buffers(page); |
| 912 | offset = page_offset(page); |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 913 | do { |
| 914 | if (offset >= end_offset) |
| 915 | break; |
| 916 | if (!buffer_uptodate(bh)) |
| 917 | uptodate = 0; |
| 918 | |
| 919 | /* |
| 920 | * set_page_dirty dirties all buffers in a page, independent |
| 921 | * of their state. The dirty state however is entirely |
| 922 | * meaningless for holes (!mapped && uptodate), so skip |
| 923 | * buffers covering holes here. |
| 924 | */ |
| 925 | if (!buffer_mapped(bh) && buffer_uptodate(bh)) { |
| 926 | wpc->imap_valid = false; |
| 927 | continue; |
| 928 | } |
| 929 | |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 930 | if (buffer_unwritten(bh)) |
| 931 | new_type = XFS_IO_UNWRITTEN; |
| 932 | else if (buffer_delay(bh)) |
| 933 | new_type = XFS_IO_DELALLOC; |
| 934 | else if (buffer_uptodate(bh)) |
| 935 | new_type = XFS_IO_OVERWRITE; |
| 936 | else { |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 937 | if (PageUptodate(page)) |
| 938 | ASSERT(buffer_mapped(bh)); |
| 939 | /* |
| 940 | * This buffer is not uptodate and will not be |
| 941 | * written to disk. Ensure that we will put any |
| 942 | * subsequent writeable buffers into a new |
| 943 | * ioend. |
| 944 | */ |
| 945 | wpc->imap_valid = false; |
| 946 | continue; |
| 947 | } |
| 948 | |
Darrick J. Wong | ef47366 | 2016-10-03 09:11:34 -0700 | [diff] [blame] | 949 | if (xfs_is_reflink_inode(XFS_I(inode))) { |
| 950 | error = xfs_map_cow(wpc, inode, offset, &new_type); |
| 951 | if (error) |
| 952 | goto out; |
| 953 | } |
| 954 | |
| 955 | if (wpc->io_type != new_type) { |
| 956 | wpc->io_type = new_type; |
| 957 | wpc->imap_valid = false; |
| 958 | } |
| 959 | |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 960 | if (wpc->imap_valid) |
| 961 | wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, |
| 962 | offset); |
| 963 | if (!wpc->imap_valid) { |
| 964 | error = xfs_map_blocks(inode, offset, &wpc->imap, |
| 965 | wpc->io_type); |
| 966 | if (error) |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 967 | goto out; |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 968 | wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, |
| 969 | offset); |
| 970 | } |
| 971 | if (wpc->imap_valid) { |
| 972 | lock_buffer(bh); |
| 973 | if (wpc->io_type != XFS_IO_OVERWRITE) |
| 974 | xfs_map_at_offset(inode, bh, &wpc->imap, offset); |
Dave Chinner | bb18782 | 2016-04-06 08:11:25 +1000 | [diff] [blame] | 975 | xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list); |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 976 | count++; |
| 977 | } |
| 978 | |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 979 | } while (offset += len, ((bh = bh->b_this_page) != head)); |
| 980 | |
| 981 | if (uptodate && bh == head) |
| 982 | SetPageUptodate(page); |
| 983 | |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 984 | ASSERT(wpc->ioend || list_empty(&submit_list)); |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 985 | |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 986 | out: |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 987 | /* |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 988 | * On error, we have to fail the ioend here because we have locked |
| 989 | * buffers in the ioend. If we don't do this, we'll deadlock |
| 990 | * invalidating the page as that tries to lock the buffers on the page. |
| 991 | * Also, because we may have set pages under writeback, we have to make |
| 992 | * sure we run IO completion to mark the error state of the IO |
| 993 | * appropriately, so we can't cancel the ioend directly here. That means |
| 994 | * we have to mark this page as under writeback if we included any |
| 995 | * buffers from it in the ioend chain so that completion treats it |
| 996 | * correctly. |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 997 | * |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 998 | * If we didn't include the page in the ioend, the on error we can |
| 999 | * simply discard and unlock it as there are no other users of the page |
| 1000 | * or it's buffers right now. The caller will still need to trigger |
| 1001 | * submission of outstanding ioends on the writepage context so they are |
| 1002 | * treated correctly on error. |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 1003 | */ |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 1004 | if (count) { |
| 1005 | xfs_start_page_writeback(page, !error); |
| 1006 | |
| 1007 | /* |
| 1008 | * Preserve the original error if there was one, otherwise catch |
| 1009 | * submission errors here and propagate into subsequent ioend |
| 1010 | * submissions. |
| 1011 | */ |
| 1012 | list_for_each_entry_safe(ioend, next, &submit_list, io_list) { |
| 1013 | int error2; |
| 1014 | |
| 1015 | list_del_init(&ioend->io_list); |
| 1016 | error2 = xfs_submit_ioend(wbc, ioend, error); |
| 1017 | if (error2 && !error) |
| 1018 | error = error2; |
| 1019 | } |
| 1020 | } else if (error) { |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 1021 | xfs_aops_discard_page(page); |
| 1022 | ClearPageUptodate(page); |
| 1023 | unlock_page(page); |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 1024 | } else { |
| 1025 | /* |
| 1026 | * We can end up here with no error and nothing to write if we |
| 1027 | * race with a partial page truncate on a sub-page block sized |
| 1028 | * filesystem. In that case we need to mark the page clean. |
| 1029 | */ |
| 1030 | xfs_start_page_writeback(page, 1); |
| 1031 | end_page_writeback(page); |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 1032 | } |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 1033 | |
Dave Chinner | bfce7d2 | 2016-02-15 17:21:37 +1100 | [diff] [blame] | 1034 | mapping_set_error(page->mapping, error); |
| 1035 | return error; |
| 1036 | } |
| 1037 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1038 | /* |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1039 | * Write out a dirty page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1040 | * |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1041 | * For delalloc space on the page we need to allocate space and flush it. |
| 1042 | * For unwritten space on the page we need to start the conversion to |
| 1043 | * regular allocated space. |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1044 | * For any other dirty buffer heads on the page we should flush them. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1045 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | STATIC int |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 1047 | xfs_do_writepage( |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1048 | struct page *page, |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 1049 | struct writeback_control *wbc, |
| 1050 | void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | { |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 1052 | struct xfs_writepage_ctx *wpc = data; |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1053 | struct inode *inode = page->mapping->host; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | loff_t offset; |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 1055 | uint64_t end_offset; |
Dave Chinner | ad68972 | 2016-02-15 17:21:31 +1100 | [diff] [blame] | 1056 | pgoff_t end_index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1057 | |
Lukas Czerner | 34097df | 2013-05-21 23:58:01 -0400 | [diff] [blame] | 1058 | trace_xfs_writepage(inode, page, 0, 0); |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1059 | |
Christoph Hellwig | 20cb52e | 2010-06-24 09:46:01 +1000 | [diff] [blame] | 1060 | ASSERT(page_has_buffers(page)); |
| 1061 | |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1062 | /* |
| 1063 | * Refuse to write the page out if we are called from reclaim context. |
| 1064 | * |
Christoph Hellwig | d4f7a5c | 2010-06-28 10:34:44 -0400 | [diff] [blame] | 1065 | * This avoids stack overflows when called from deeply used stacks in |
| 1066 | * random callers for direct reclaim or memcg reclaim. We explicitly |
| 1067 | * allow reclaim from kswapd as the stack usage there is relatively low. |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1068 | * |
Mel Gorman | 94054fa | 2011-10-31 17:07:45 -0700 | [diff] [blame] | 1069 | * This should never happen except in the case of a VM regression so |
| 1070 | * warn about it. |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1071 | */ |
Mel Gorman | 94054fa | 2011-10-31 17:07:45 -0700 | [diff] [blame] | 1072 | if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == |
| 1073 | PF_MEMALLOC)) |
Christoph Hellwig | b5420f2 | 2010-08-24 11:47:51 +1000 | [diff] [blame] | 1074 | goto redirty; |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1075 | |
| 1076 | /* |
Christoph Hellwig | 680a647 | 2011-07-08 14:34:05 +0200 | [diff] [blame] | 1077 | * Given that we do not allow direct reclaim to call us, we should |
| 1078 | * never be called while in a filesystem transaction. |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1079 | */ |
Michal Hocko | 9070733 | 2017-05-03 14:53:12 -0700 | [diff] [blame] | 1080 | if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS)) |
Christoph Hellwig | b5420f2 | 2010-08-24 11:47:51 +1000 | [diff] [blame] | 1081 | goto redirty; |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1082 | |
Jie Liu | 8695d27 | 2014-05-20 08:24:26 +1000 | [diff] [blame] | 1083 | /* |
Dave Chinner | ad68972 | 2016-02-15 17:21:31 +1100 | [diff] [blame] | 1084 | * Is this page beyond the end of the file? |
| 1085 | * |
Jie Liu | 8695d27 | 2014-05-20 08:24:26 +1000 | [diff] [blame] | 1086 | * The page index is less than the end_index, adjust the end_offset |
| 1087 | * to the highest offset that this page should represent. |
| 1088 | * ----------------------------------------------------- |
| 1089 | * | file mapping | <EOF> | |
| 1090 | * ----------------------------------------------------- |
| 1091 | * | Page ... | Page N-2 | Page N-1 | Page N | | |
| 1092 | * ^--------------------------------^----------|-------- |
| 1093 | * | desired writeback range | see else | |
| 1094 | * ---------------------------------^------------------| |
| 1095 | */ |
Dave Chinner | ad68972 | 2016-02-15 17:21:31 +1100 | [diff] [blame] | 1096 | offset = i_size_read(inode); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1097 | end_index = offset >> PAGE_SHIFT; |
Jie Liu | 8695d27 | 2014-05-20 08:24:26 +1000 | [diff] [blame] | 1098 | if (page->index < end_index) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1099 | end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT; |
Jie Liu | 8695d27 | 2014-05-20 08:24:26 +1000 | [diff] [blame] | 1100 | else { |
| 1101 | /* |
| 1102 | * Check whether the page to write out is beyond or straddles |
| 1103 | * i_size or not. |
| 1104 | * ------------------------------------------------------- |
| 1105 | * | file mapping | <EOF> | |
| 1106 | * ------------------------------------------------------- |
| 1107 | * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | |
| 1108 | * ^--------------------------------^-----------|--------- |
| 1109 | * | | Straddles | |
| 1110 | * ---------------------------------^-----------|--------| |
| 1111 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1112 | unsigned offset_into_page = offset & (PAGE_SIZE - 1); |
Christoph Hellwig | 6b7a03f | 2012-07-03 12:20:00 -0400 | [diff] [blame] | 1113 | |
| 1114 | /* |
Jan Kara | ff9a28f | 2013-03-14 14:30:54 +0100 | [diff] [blame] | 1115 | * Skip the page if it is fully outside i_size, e.g. due to a |
| 1116 | * truncate operation that is in progress. We must redirty the |
| 1117 | * page so that reclaim stops reclaiming it. Otherwise |
| 1118 | * xfs_vm_releasepage() is called on it and gets confused. |
Jie Liu | 8695d27 | 2014-05-20 08:24:26 +1000 | [diff] [blame] | 1119 | * |
| 1120 | * Note that the end_index is unsigned long, it would overflow |
| 1121 | * if the given offset is greater than 16TB on 32-bit system |
| 1122 | * and if we do check the page is fully outside i_size or not |
| 1123 | * via "if (page->index >= end_index + 1)" as "end_index + 1" |
| 1124 | * will be evaluated to 0. Hence this page will be redirtied |
| 1125 | * and be written out repeatedly which would result in an |
| 1126 | * infinite loop, the user program that perform this operation |
| 1127 | * will hang. Instead, we can verify this situation by checking |
| 1128 | * if the page to write is totally beyond the i_size or if it's |
| 1129 | * offset is just equal to the EOF. |
Christoph Hellwig | 6b7a03f | 2012-07-03 12:20:00 -0400 | [diff] [blame] | 1130 | */ |
Jie Liu | 8695d27 | 2014-05-20 08:24:26 +1000 | [diff] [blame] | 1131 | if (page->index > end_index || |
| 1132 | (page->index == end_index && offset_into_page == 0)) |
Jan Kara | ff9a28f | 2013-03-14 14:30:54 +0100 | [diff] [blame] | 1133 | goto redirty; |
Christoph Hellwig | 6b7a03f | 2012-07-03 12:20:00 -0400 | [diff] [blame] | 1134 | |
| 1135 | /* |
| 1136 | * The page straddles i_size. It must be zeroed out on each |
| 1137 | * and every writepage invocation because it may be mmapped. |
| 1138 | * "A file is mapped in multiples of the page size. For a file |
Jie Liu | 8695d27 | 2014-05-20 08:24:26 +1000 | [diff] [blame] | 1139 | * that is not a multiple of the page size, the remaining |
Christoph Hellwig | 6b7a03f | 2012-07-03 12:20:00 -0400 | [diff] [blame] | 1140 | * memory is zeroed when mapped, and writes to that region are |
| 1141 | * not written out to the file." |
| 1142 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1143 | zero_user_segment(page, offset_into_page, PAGE_SIZE); |
Jie Liu | 8695d27 | 2014-05-20 08:24:26 +1000 | [diff] [blame] | 1144 | |
| 1145 | /* Adjust the end_offset to the end of file */ |
| 1146 | end_offset = offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 | } |
| 1148 | |
Darrick J. Wong | 2d5f4b5 | 2017-11-27 09:50:22 -0800 | [diff] [blame] | 1149 | return xfs_writepage_map(wpc, wbc, inode, page, end_offset); |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1150 | |
Christoph Hellwig | b5420f2 | 2010-08-24 11:47:51 +1000 | [diff] [blame] | 1151 | redirty: |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1152 | redirty_page_for_writepage(wbc, page); |
| 1153 | unlock_page(page); |
| 1154 | return 0; |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1155 | } |
| 1156 | |
Nathan Scott | 7d4fb40 | 2006-06-09 15:27:16 +1000 | [diff] [blame] | 1157 | STATIC int |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 1158 | xfs_vm_writepage( |
| 1159 | struct page *page, |
| 1160 | struct writeback_control *wbc) |
| 1161 | { |
| 1162 | struct xfs_writepage_ctx wpc = { |
| 1163 | .io_type = XFS_IO_INVALID, |
| 1164 | }; |
| 1165 | int ret; |
| 1166 | |
| 1167 | ret = xfs_do_writepage(page, wbc, &wpc); |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 1168 | if (wpc.ioend) |
| 1169 | ret = xfs_submit_ioend(wbc, wpc.ioend, ret); |
| 1170 | return ret; |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 1171 | } |
| 1172 | |
| 1173 | STATIC int |
Nathan Scott | 7d4fb40 | 2006-06-09 15:27:16 +1000 | [diff] [blame] | 1174 | xfs_vm_writepages( |
| 1175 | struct address_space *mapping, |
| 1176 | struct writeback_control *wbc) |
| 1177 | { |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 1178 | struct xfs_writepage_ctx wpc = { |
| 1179 | .io_type = XFS_IO_INVALID, |
| 1180 | }; |
| 1181 | int ret; |
| 1182 | |
Christoph Hellwig | b3aea4e | 2007-08-29 11:44:37 +1000 | [diff] [blame] | 1183 | xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); |
Ross Zwisler | 7f6d5b5 | 2016-02-26 15:19:55 -0800 | [diff] [blame] | 1184 | if (dax_mapping(mapping)) |
| 1185 | return dax_writeback_mapping_range(mapping, |
| 1186 | xfs_find_bdev_for_inode(mapping->host), wbc); |
| 1187 | |
Dave Chinner | fbcc025 | 2016-02-15 17:21:19 +1100 | [diff] [blame] | 1188 | ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc); |
Dave Chinner | e10de37 | 2016-02-15 17:23:12 +1100 | [diff] [blame] | 1189 | if (wpc.ioend) |
| 1190 | ret = xfs_submit_ioend(wbc, wpc.ioend, ret); |
| 1191 | return ret; |
Nathan Scott | 7d4fb40 | 2006-06-09 15:27:16 +1000 | [diff] [blame] | 1192 | } |
| 1193 | |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1194 | /* |
| 1195 | * Called to move a page into cleanable state - and from there |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1196 | * to be released. The page should already be clean. We always |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1197 | * have buffer heads in this call. |
| 1198 | * |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1199 | * Returns 1 if the page is ok to release, 0 otherwise. |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1200 | */ |
| 1201 | STATIC int |
Nathan Scott | 238f4c5 | 2006-03-17 17:26:25 +1100 | [diff] [blame] | 1202 | xfs_vm_releasepage( |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1203 | struct page *page, |
| 1204 | gfp_t gfp_mask) |
| 1205 | { |
Christoph Hellwig | 20cb52e | 2010-06-24 09:46:01 +1000 | [diff] [blame] | 1206 | int delalloc, unwritten; |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1207 | |
Lukas Czerner | 34097df | 2013-05-21 23:58:01 -0400 | [diff] [blame] | 1208 | trace_xfs_releasepage(page->mapping->host, page, 0, 0); |
Nathan Scott | 238f4c5 | 2006-03-17 17:26:25 +1100 | [diff] [blame] | 1209 | |
Brian Foster | 99579cc | 2016-07-22 09:50:38 +1000 | [diff] [blame] | 1210 | /* |
| 1211 | * mm accommodates an old ext3 case where clean pages might not have had |
| 1212 | * the dirty bit cleared. Thus, it can send actual dirty pages to |
| 1213 | * ->releasepage() via shrink_active_list(). Conversely, |
Dave Chinner | 793d7db | 2017-10-13 09:47:45 -0700 | [diff] [blame] | 1214 | * block_invalidatepage() can send pages that are still marked dirty but |
| 1215 | * otherwise have invalidated buffers. |
Brian Foster | 99579cc | 2016-07-22 09:50:38 +1000 | [diff] [blame] | 1216 | * |
Jan Kara | 0a417b8 | 2017-01-11 10:20:04 -0800 | [diff] [blame] | 1217 | * We want to release the latter to avoid unnecessary buildup of the |
Dave Chinner | 793d7db | 2017-10-13 09:47:45 -0700 | [diff] [blame] | 1218 | * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages |
| 1219 | * that are entirely invalidated and need to be released. Hence the |
| 1220 | * only time we should get dirty pages here is through |
| 1221 | * shrink_active_list() and so we can simply skip those now. |
| 1222 | * |
| 1223 | * warn if we've left any lingering delalloc/unwritten buffers on clean |
| 1224 | * or invalidated pages we are about to release. |
Brian Foster | 99579cc | 2016-07-22 09:50:38 +1000 | [diff] [blame] | 1225 | */ |
Dave Chinner | 793d7db | 2017-10-13 09:47:45 -0700 | [diff] [blame] | 1226 | if (PageDirty(page)) |
| 1227 | return 0; |
| 1228 | |
Christoph Hellwig | 20cb52e | 2010-06-24 09:46:01 +1000 | [diff] [blame] | 1229 | xfs_count_page_state(page, &delalloc, &unwritten); |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1230 | |
Dave Chinner | 793d7db | 2017-10-13 09:47:45 -0700 | [diff] [blame] | 1231 | if (WARN_ON_ONCE(delalloc)) |
Christoph Hellwig | 89f3b363 | 2010-06-24 09:45:48 +1000 | [diff] [blame] | 1232 | return 0; |
Dave Chinner | 793d7db | 2017-10-13 09:47:45 -0700 | [diff] [blame] | 1233 | if (WARN_ON_ONCE(unwritten)) |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1234 | return 0; |
| 1235 | |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1236 | return try_to_free_buffers(page); |
| 1237 | } |
| 1238 | |
Dave Chinner | a719370 | 2015-04-16 21:57:48 +1000 | [diff] [blame] | 1239 | /* |
Dave Chinner | 1fdca9c | 2015-04-16 21:58:21 +1000 | [diff] [blame] | 1240 | * If this is O_DIRECT or the mpage code calling tell them how large the mapping |
| 1241 | * is, so that we can avoid repeated get_blocks calls. |
| 1242 | * |
| 1243 | * If the mapping spans EOF, then we have to break the mapping up as the mapping |
| 1244 | * for blocks beyond EOF must be marked new so that sub block regions can be |
| 1245 | * correctly zeroed. We can't do this for mappings within EOF unless the mapping |
| 1246 | * was just allocated or is unwritten, otherwise the callers would overwrite |
| 1247 | * existing data with zeros. Hence we have to split the mapping into a range up |
| 1248 | * to and including EOF, and a second mapping for beyond EOF. |
| 1249 | */ |
| 1250 | static void |
| 1251 | xfs_map_trim_size( |
| 1252 | struct inode *inode, |
| 1253 | sector_t iblock, |
| 1254 | struct buffer_head *bh_result, |
| 1255 | struct xfs_bmbt_irec *imap, |
| 1256 | xfs_off_t offset, |
| 1257 | ssize_t size) |
| 1258 | { |
| 1259 | xfs_off_t mapping_size; |
| 1260 | |
| 1261 | mapping_size = imap->br_startoff + imap->br_blockcount - iblock; |
| 1262 | mapping_size <<= inode->i_blkbits; |
| 1263 | |
| 1264 | ASSERT(mapping_size > 0); |
| 1265 | if (mapping_size > size) |
| 1266 | mapping_size = size; |
| 1267 | if (offset < i_size_read(inode) && |
Darrick J. Wong | 22a6c83 | 2017-11-27 09:50:17 -0800 | [diff] [blame] | 1268 | (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) { |
Dave Chinner | 1fdca9c | 2015-04-16 21:58:21 +1000 | [diff] [blame] | 1269 | /* limit mapping to block that spans EOF */ |
| 1270 | mapping_size = roundup_64(i_size_read(inode) - offset, |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 1271 | i_blocksize(inode)); |
Dave Chinner | 1fdca9c | 2015-04-16 21:58:21 +1000 | [diff] [blame] | 1272 | } |
| 1273 | if (mapping_size > LONG_MAX) |
| 1274 | mapping_size = LONG_MAX; |
| 1275 | |
| 1276 | bh_result->b_size = mapping_size; |
| 1277 | } |
| 1278 | |
Darrick J. Wong | 0613f16 | 2016-10-03 09:11:37 -0700 | [diff] [blame] | 1279 | static int |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 1280 | xfs_get_blocks( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1281 | struct inode *inode, |
| 1282 | sector_t iblock, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | struct buffer_head *bh_result, |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 1284 | int create) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | { |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 1286 | struct xfs_inode *ip = XFS_I(inode); |
| 1287 | struct xfs_mount *mp = ip->i_mount; |
| 1288 | xfs_fileoff_t offset_fsb, end_fsb; |
| 1289 | int error = 0; |
| 1290 | int lockmode = 0; |
Christoph Hellwig | 207d041 | 2010-04-28 12:28:56 +0000 | [diff] [blame] | 1291 | struct xfs_bmbt_irec imap; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 1292 | int nimaps = 1; |
Nathan Scott | fdc7ed7 | 2005-11-02 15:13:13 +1100 | [diff] [blame] | 1293 | xfs_off_t offset; |
| 1294 | ssize_t size; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 1295 | |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 1296 | BUG_ON(create); |
Christoph Hellwig | 6e8a27a | 2016-06-21 09:53:45 +1000 | [diff] [blame] | 1297 | |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 1298 | if (XFS_FORCED_SHUTDOWN(mp)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 1299 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1300 | |
Nathan Scott | fdc7ed7 | 2005-11-02 15:13:13 +1100 | [diff] [blame] | 1301 | offset = (xfs_off_t)iblock << inode->i_blkbits; |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 1302 | ASSERT(bh_result->b_size >= i_blocksize(inode)); |
Nathan Scott | c253666 | 2006-03-29 10:44:40 +1000 | [diff] [blame] | 1303 | size = bh_result->b_size; |
Lachlan McIlroy | 364f358 | 2008-09-17 16:50:14 +1000 | [diff] [blame] | 1304 | |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 1305 | if (offset >= i_size_read(inode)) |
Lachlan McIlroy | 364f358 | 2008-09-17 16:50:14 +1000 | [diff] [blame] | 1306 | return 0; |
| 1307 | |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 1308 | /* |
| 1309 | * Direct I/O is usually done on preallocated files, so try getting |
Christoph Hellwig | 6e8a27a | 2016-06-21 09:53:45 +1000 | [diff] [blame] | 1310 | * a block mapping without an exclusive lock first. |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 1311 | */ |
Christoph Hellwig | 6e8a27a | 2016-06-21 09:53:45 +1000 | [diff] [blame] | 1312 | lockmode = xfs_ilock_data_map_shared(ip); |
Christoph Hellwig | f2bde9b | 2010-06-24 11:44:35 +1000 | [diff] [blame] | 1313 | |
Dave Chinner | d2c2819 | 2012-06-08 15:44:53 +1000 | [diff] [blame] | 1314 | ASSERT(offset <= mp->m_super->s_maxbytes); |
Darrick J. Wong | 22a6c83 | 2017-11-27 09:50:17 -0800 | [diff] [blame] | 1315 | if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes) |
Dave Chinner | d2c2819 | 2012-06-08 15:44:53 +1000 | [diff] [blame] | 1316 | size = mp->m_super->s_maxbytes - offset; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 1317 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); |
| 1318 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
| 1319 | |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 1320 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, |
| 1321 | &imap, &nimaps, XFS_BMAPI_ENTIRE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1322 | if (error) |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 1323 | goto out_unlock; |
| 1324 | |
Christoph Hellwig | acdda3a | 2016-11-30 14:37:15 +1100 | [diff] [blame] | 1325 | if (nimaps) { |
Dave Chinner | d5cc2e3 | 2015-04-16 21:59:07 +1000 | [diff] [blame] | 1326 | trace_xfs_get_blocks_found(ip, offset, size, |
Christoph Hellwig | 63fbb4c | 2017-03-28 14:53:36 -0700 | [diff] [blame] | 1327 | imap.br_state == XFS_EXT_UNWRITTEN ? |
| 1328 | XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap); |
Dave Chinner | 507630b | 2012-03-27 10:34:50 -0400 | [diff] [blame] | 1329 | xfs_iunlock(ip, lockmode); |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 1330 | } else { |
| 1331 | trace_xfs_get_blocks_notfound(ip, offset, size); |
| 1332 | goto out_unlock; |
| 1333 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 | |
Dave Chinner | 1fdca9c | 2015-04-16 21:58:21 +1000 | [diff] [blame] | 1335 | /* trim mapping down to size requested */ |
Christoph Hellwig | 6e8a27a | 2016-06-21 09:53:45 +1000 | [diff] [blame] | 1336 | xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size); |
Dave Chinner | 1fdca9c | 2015-04-16 21:58:21 +1000 | [diff] [blame] | 1337 | |
Dave Chinner | a719370 | 2015-04-16 21:57:48 +1000 | [diff] [blame] | 1338 | /* |
| 1339 | * For unwritten extents do not report a disk address in the buffered |
| 1340 | * read case (treat as if we're reading into a hole). |
| 1341 | */ |
Christoph Hellwig | 9c4f29d | 2017-03-28 14:53:35 -0700 | [diff] [blame] | 1342 | if (xfs_bmap_is_real_extent(&imap)) |
Dave Chinner | a719370 | 2015-04-16 21:57:48 +1000 | [diff] [blame] | 1343 | xfs_map_buffer(inode, bh_result, &imap, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 | |
Nathan Scott | c253666 | 2006-03-29 10:44:40 +1000 | [diff] [blame] | 1345 | /* |
| 1346 | * If this is a realtime file, data may be on a different device. |
| 1347 | * to that pointed to from the buffer_head b_bdev currently. |
| 1348 | */ |
Christoph Hellwig | 046f168 | 2010-04-28 12:28:52 +0000 | [diff] [blame] | 1349 | bh_result->b_bdev = xfs_find_bdev_for_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 | return 0; |
Christoph Hellwig | a206c81 | 2010-12-10 08:42:20 +0000 | [diff] [blame] | 1351 | |
| 1352 | out_unlock: |
| 1353 | xfs_iunlock(ip, lockmode); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1354 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1355 | } |
| 1356 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 | STATIC ssize_t |
Nathan Scott | e4c573b | 2006-03-14 13:54:26 +1100 | [diff] [blame] | 1358 | xfs_vm_direct_IO( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | struct kiocb *iocb, |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 1360 | struct iov_iter *iter) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1361 | { |
Jie Liu | 58e5985 | 2013-07-16 13:11:16 +0800 | [diff] [blame] | 1362 | /* |
Christoph Hellwig | fa8d972 | 2016-07-20 11:38:01 +1000 | [diff] [blame] | 1363 | * We just need the method present so that open/fcntl allow direct I/O. |
Jie Liu | 58e5985 | 2013-07-16 13:11:16 +0800 | [diff] [blame] | 1364 | */ |
Christoph Hellwig | fa8d972 | 2016-07-20 11:38:01 +1000 | [diff] [blame] | 1365 | return -EINVAL; |
Nathan Scott | f51623b | 2006-03-14 13:26:27 +1100 | [diff] [blame] | 1366 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1367 | |
| 1368 | STATIC sector_t |
Nathan Scott | e4c573b | 2006-03-14 13:54:26 +1100 | [diff] [blame] | 1369 | xfs_vm_bmap( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 | struct address_space *mapping, |
| 1371 | sector_t block) |
| 1372 | { |
| 1373 | struct inode *inode = (struct inode *)mapping->host; |
Christoph Hellwig | 739bfb2 | 2007-08-29 10:58:01 +1000 | [diff] [blame] | 1374 | struct xfs_inode *ip = XFS_I(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | |
Christoph Hellwig | cca28fb | 2010-06-24 11:57:09 +1000 | [diff] [blame] | 1376 | trace_xfs_vm_bmap(XFS_I(inode)); |
Darrick J. Wong | db1327b | 2016-10-03 09:11:36 -0700 | [diff] [blame] | 1377 | |
| 1378 | /* |
| 1379 | * The swap code (ab-)uses ->bmap to get a block mapping and then |
| 1380 | * bypasseѕ the file system for actual I/O. We really can't allow |
| 1381 | * that on reflinks inodes, so we have to skip out here. And yes, |
Darrick J. Wong | eb5e248 | 2017-06-21 20:27:35 -0700 | [diff] [blame] | 1382 | * 0 is the magic code for a bmap error. |
| 1383 | * |
| 1384 | * Since we don't pass back blockdev info, we can't return bmap |
| 1385 | * information for rt files either. |
Darrick J. Wong | db1327b | 2016-10-03 09:11:36 -0700 | [diff] [blame] | 1386 | */ |
Darrick J. Wong | eb5e248 | 2017-06-21 20:27:35 -0700 | [diff] [blame] | 1387 | if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip)) |
Darrick J. Wong | db1327b | 2016-10-03 09:11:36 -0700 | [diff] [blame] | 1388 | return 0; |
Christoph Hellwig | 6552321 | 2016-11-30 14:33:25 +1100 | [diff] [blame] | 1389 | |
Dave Chinner | 4bc1ea6 | 2012-11-12 22:53:56 +1100 | [diff] [blame] | 1390 | filemap_write_and_wait(mapping); |
Nathan Scott | c253666 | 2006-03-29 10:44:40 +1000 | [diff] [blame] | 1391 | return generic_block_bmap(mapping, block, xfs_get_blocks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | } |
| 1393 | |
| 1394 | STATIC int |
Nathan Scott | e4c573b | 2006-03-14 13:54:26 +1100 | [diff] [blame] | 1395 | xfs_vm_readpage( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 | struct file *unused, |
| 1397 | struct page *page) |
| 1398 | { |
Dave Chinner | 121e213 | 2016-01-08 11:28:35 +1100 | [diff] [blame] | 1399 | trace_xfs_vm_readpage(page->mapping->host, 1); |
Nathan Scott | c253666 | 2006-03-29 10:44:40 +1000 | [diff] [blame] | 1400 | return mpage_readpage(page, xfs_get_blocks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 | } |
| 1402 | |
| 1403 | STATIC int |
Nathan Scott | e4c573b | 2006-03-14 13:54:26 +1100 | [diff] [blame] | 1404 | xfs_vm_readpages( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 | struct file *unused, |
| 1406 | struct address_space *mapping, |
| 1407 | struct list_head *pages, |
| 1408 | unsigned nr_pages) |
| 1409 | { |
Dave Chinner | 121e213 | 2016-01-08 11:28:35 +1100 | [diff] [blame] | 1410 | trace_xfs_vm_readpages(mapping->host, nr_pages); |
Nathan Scott | c253666 | 2006-03-29 10:44:40 +1000 | [diff] [blame] | 1411 | return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1412 | } |
| 1413 | |
Dave Chinner | 22e757a | 2014-09-02 12:12:51 +1000 | [diff] [blame] | 1414 | /* |
| 1415 | * This is basically a copy of __set_page_dirty_buffers() with one |
| 1416 | * small tweak: buffers beyond EOF do not get marked dirty. If we mark them |
| 1417 | * dirty, we'll never be able to clean them because we don't write buffers |
| 1418 | * beyond EOF, and that means we can't invalidate pages that span EOF |
| 1419 | * that have been marked dirty. Further, the dirty state can leak into |
| 1420 | * the file interior if the file is extended, resulting in all sorts of |
| 1421 | * bad things happening as the state does not match the underlying data. |
| 1422 | * |
| 1423 | * XXX: this really indicates that bufferheads in XFS need to die. Warts like |
| 1424 | * this only exist because of bufferheads and how the generic code manages them. |
| 1425 | */ |
| 1426 | STATIC int |
| 1427 | xfs_vm_set_page_dirty( |
| 1428 | struct page *page) |
| 1429 | { |
| 1430 | struct address_space *mapping = page->mapping; |
| 1431 | struct inode *inode = mapping->host; |
| 1432 | loff_t end_offset; |
| 1433 | loff_t offset; |
| 1434 | int newly_dirty; |
| 1435 | |
| 1436 | if (unlikely(!mapping)) |
| 1437 | return !TestSetPageDirty(page); |
| 1438 | |
| 1439 | end_offset = i_size_read(inode); |
| 1440 | offset = page_offset(page); |
| 1441 | |
| 1442 | spin_lock(&mapping->private_lock); |
| 1443 | if (page_has_buffers(page)) { |
| 1444 | struct buffer_head *head = page_buffers(page); |
| 1445 | struct buffer_head *bh = head; |
| 1446 | |
| 1447 | do { |
| 1448 | if (offset < end_offset) |
| 1449 | set_buffer_dirty(bh); |
| 1450 | bh = bh->b_this_page; |
Fabian Frederick | 9340747 | 2017-02-27 14:28:32 -0800 | [diff] [blame] | 1451 | offset += i_blocksize(inode); |
Dave Chinner | 22e757a | 2014-09-02 12:12:51 +1000 | [diff] [blame] | 1452 | } while (bh != head); |
| 1453 | } |
Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 1454 | /* |
Johannes Weiner | 81f8c3a | 2016-03-15 14:57:04 -0700 | [diff] [blame] | 1455 | * Lock out page->mem_cgroup migration to keep PageDirty |
| 1456 | * synchronized with per-memcg dirty page counters. |
Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 1457 | */ |
Johannes Weiner | 62cccb8 | 2016-03-15 14:57:22 -0700 | [diff] [blame] | 1458 | lock_page_memcg(page); |
Dave Chinner | 22e757a | 2014-09-02 12:12:51 +1000 | [diff] [blame] | 1459 | newly_dirty = !TestSetPageDirty(page); |
| 1460 | spin_unlock(&mapping->private_lock); |
| 1461 | |
| 1462 | if (newly_dirty) { |
| 1463 | /* sigh - __set_page_dirty() is static, so copy it here, too */ |
| 1464 | unsigned long flags; |
| 1465 | |
| 1466 | spin_lock_irqsave(&mapping->tree_lock, flags); |
| 1467 | if (page->mapping) { /* Race with truncate? */ |
| 1468 | WARN_ON_ONCE(!PageUptodate(page)); |
Johannes Weiner | 62cccb8 | 2016-03-15 14:57:22 -0700 | [diff] [blame] | 1469 | account_page_dirtied(page, mapping); |
Dave Chinner | 22e757a | 2014-09-02 12:12:51 +1000 | [diff] [blame] | 1470 | radix_tree_tag_set(&mapping->page_tree, |
| 1471 | page_index(page), PAGECACHE_TAG_DIRTY); |
| 1472 | } |
| 1473 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
Dave Chinner | 22e757a | 2014-09-02 12:12:51 +1000 | [diff] [blame] | 1474 | } |
Johannes Weiner | 62cccb8 | 2016-03-15 14:57:22 -0700 | [diff] [blame] | 1475 | unlock_page_memcg(page); |
Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 1476 | if (newly_dirty) |
| 1477 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
Dave Chinner | 22e757a | 2014-09-02 12:12:51 +1000 | [diff] [blame] | 1478 | return newly_dirty; |
| 1479 | } |
| 1480 | |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 1481 | const struct address_space_operations xfs_address_space_operations = { |
Nathan Scott | e4c573b | 2006-03-14 13:54:26 +1100 | [diff] [blame] | 1482 | .readpage = xfs_vm_readpage, |
| 1483 | .readpages = xfs_vm_readpages, |
| 1484 | .writepage = xfs_vm_writepage, |
Nathan Scott | 7d4fb40 | 2006-06-09 15:27:16 +1000 | [diff] [blame] | 1485 | .writepages = xfs_vm_writepages, |
Dave Chinner | 22e757a | 2014-09-02 12:12:51 +1000 | [diff] [blame] | 1486 | .set_page_dirty = xfs_vm_set_page_dirty, |
Nathan Scott | 238f4c5 | 2006-03-17 17:26:25 +1100 | [diff] [blame] | 1487 | .releasepage = xfs_vm_releasepage, |
| 1488 | .invalidatepage = xfs_vm_invalidatepage, |
Nathan Scott | e4c573b | 2006-03-14 13:54:26 +1100 | [diff] [blame] | 1489 | .bmap = xfs_vm_bmap, |
| 1490 | .direct_IO = xfs_vm_direct_IO, |
Christoph Lameter | e965f96 | 2006-02-01 03:05:41 -0800 | [diff] [blame] | 1491 | .migratepage = buffer_migrate_page, |
Hisashi Hifumi | bddaafa | 2009-03-29 09:53:38 +0200 | [diff] [blame] | 1492 | .is_partially_uptodate = block_is_partially_uptodate, |
Andi Kleen | aa261f5 | 2009-09-16 11:50:16 +0200 | [diff] [blame] | 1493 | .error_remove_page = generic_error_remove_page, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1494 | }; |