Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 2 | /* |
| 3 | * linux/fs/ext4/page-io.c |
| 4 | * |
| 5 | * This contains the new page_io functions for ext4 |
| 6 | * |
| 7 | * Written by Theodore Ts'o, 2010. |
| 8 | */ |
| 9 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 10 | #include <linux/fs.h> |
| 11 | #include <linux/time.h> |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 12 | #include <linux/highuid.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/quotaops.h> |
| 15 | #include <linux/string.h> |
| 16 | #include <linux/buffer_head.h> |
| 17 | #include <linux/writeback.h> |
| 18 | #include <linux/pagevec.h> |
| 19 | #include <linux/mpage.h> |
| 20 | #include <linux/namei.h> |
| 21 | #include <linux/uio.h> |
| 22 | #include <linux/bio.h> |
| 23 | #include <linux/workqueue.h> |
| 24 | #include <linux/kernel.h> |
| 25 | #include <linux/slab.h> |
Jan Kara | 1ae48a6 | 2013-01-28 09:32:54 -0500 | [diff] [blame] | 26 | #include <linux/mm.h> |
NeilBrown | 4034247 | 2022-01-14 14:07:14 -0800 | [diff] [blame] | 27 | #include <linux/sched/mm.h> |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 28 | |
| 29 | #include "ext4_jbd2.h" |
| 30 | #include "xattr.h" |
| 31 | #include "acl.h" |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 32 | |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 33 | static struct kmem_cache *io_end_cachep; |
Ritesh Harjani | c8cc881 | 2019-10-16 13:07:10 +0530 | [diff] [blame] | 34 | static struct kmem_cache *io_end_vec_cachep; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 35 | |
Theodore Ts'o | 5dabfc7 | 2010-10-27 21:30:14 -0400 | [diff] [blame] | 36 | int __init ext4_init_pageio(void) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 37 | { |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 38 | io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 39 | if (io_end_cachep == NULL) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 40 | return -ENOMEM; |
Ritesh Harjani | c8cc881 | 2019-10-16 13:07:10 +0530 | [diff] [blame] | 41 | |
| 42 | io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0); |
| 43 | if (io_end_vec_cachep == NULL) { |
| 44 | kmem_cache_destroy(io_end_cachep); |
| 45 | return -ENOMEM; |
| 46 | } |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 47 | return 0; |
| 48 | } |
| 49 | |
Theodore Ts'o | 5dabfc7 | 2010-10-27 21:30:14 -0400 | [diff] [blame] | 50 | void ext4_exit_pageio(void) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 51 | { |
| 52 | kmem_cache_destroy(io_end_cachep); |
Ritesh Harjani | c8cc881 | 2019-10-16 13:07:10 +0530 | [diff] [blame] | 53 | kmem_cache_destroy(io_end_vec_cachep); |
| 54 | } |
| 55 | |
| 56 | struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end) |
| 57 | { |
| 58 | struct ext4_io_end_vec *io_end_vec; |
| 59 | |
| 60 | io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS); |
| 61 | if (!io_end_vec) |
| 62 | return ERR_PTR(-ENOMEM); |
| 63 | INIT_LIST_HEAD(&io_end_vec->list); |
| 64 | list_add_tail(&io_end_vec->list, &io_end->list_vec); |
| 65 | return io_end_vec; |
| 66 | } |
| 67 | |
| 68 | static void ext4_free_io_end_vec(ext4_io_end_t *io_end) |
| 69 | { |
| 70 | struct ext4_io_end_vec *io_end_vec, *tmp; |
| 71 | |
| 72 | if (list_empty(&io_end->list_vec)) |
| 73 | return; |
| 74 | list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) { |
| 75 | list_del(&io_end_vec->list); |
| 76 | kmem_cache_free(io_end_vec_cachep, io_end_vec); |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end) |
| 81 | { |
| 82 | BUG_ON(list_empty(&io_end->list_vec)); |
| 83 | return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 84 | } |
| 85 | |
Theodore Ts'o | 1ada47d | 2013-03-20 09:39:42 -0400 | [diff] [blame] | 86 | /* |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 87 | * Print an buffer I/O error compatible with the fs/buffer.c. This |
| 88 | * provides compatibility with dmesg scrapers that look for a specific |
| 89 | * buffer I/O error message. We really need a unified error reporting |
| 90 | * structure to userspace ala Digital Unix's uerf system, but it's |
| 91 | * probably not going to happen in my lifetime, due to LKML politics... |
| 92 | */ |
| 93 | static void buffer_io_error(struct buffer_head *bh) |
| 94 | { |
Dmitry Monakhov | a1c6f057 | 2015-04-13 16:31:37 +0400 | [diff] [blame] | 95 | printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n", |
| 96 | bh->b_bdev, |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 97 | (unsigned long long)bh->b_blocknr); |
| 98 | } |
| 99 | |
| 100 | static void ext4_finish_bio(struct bio *bio) |
| 101 | { |
Kent Overstreet | 2c30c71 | 2013-11-07 12:20:26 -0800 | [diff] [blame] | 102 | struct bio_vec *bvec; |
Ming Lei | 6dc4f10 | 2019-02-15 19:13:19 +0800 | [diff] [blame] | 103 | struct bvec_iter_all iter_all; |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 104 | |
Christoph Hellwig | 2b070cf | 2019-04-25 09:03:00 +0200 | [diff] [blame] | 105 | bio_for_each_segment_all(bvec, bio, iter_all) { |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 106 | struct page *page = bvec->bv_page; |
Eric Biggers | d2d0727 | 2019-05-20 09:29:39 -0700 | [diff] [blame] | 107 | struct page *bounce_page = NULL; |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 108 | struct buffer_head *bh, *head; |
| 109 | unsigned bio_start = bvec->bv_offset; |
| 110 | unsigned bio_end = bio_start + bvec->bv_len; |
| 111 | unsigned under_io = 0; |
| 112 | unsigned long flags; |
| 113 | |
Eric Biggers | d2d0727 | 2019-05-20 09:29:39 -0700 | [diff] [blame] | 114 | if (fscrypt_is_bounce_page(page)) { |
| 115 | bounce_page = page; |
| 116 | page = fscrypt_pagecache_page(bounce_page); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 117 | } |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 118 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 119 | if (bio->bi_status) { |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 120 | SetPageError(page); |
Michal Hocko | 5114a97 | 2016-10-11 13:56:01 -0700 | [diff] [blame] | 121 | mapping_set_error(page->mapping, -EIO); |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 122 | } |
| 123 | bh = head = page_buffers(page); |
| 124 | /* |
Thomas Gleixner | f1e67e3 | 2019-11-18 14:28:24 +0100 | [diff] [blame] | 125 | * We check all buffers in the page under b_uptodate_lock |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 126 | * to avoid races with other end io clearing async_write flags |
| 127 | */ |
Thomas Gleixner | f1e67e3 | 2019-11-18 14:28:24 +0100 | [diff] [blame] | 128 | spin_lock_irqsave(&head->b_uptodate_lock, flags); |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 129 | do { |
| 130 | if (bh_offset(bh) < bio_start || |
| 131 | bh_offset(bh) + bh->b_size > bio_end) { |
| 132 | if (buffer_async_write(bh)) |
| 133 | under_io++; |
| 134 | continue; |
| 135 | } |
| 136 | clear_buffer_async_write(bh); |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 137 | if (bio->bi_status) |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 138 | buffer_io_error(bh); |
| 139 | } while ((bh = bh->b_this_page) != head); |
Thomas Gleixner | f1e67e3 | 2019-11-18 14:28:24 +0100 | [diff] [blame] | 140 | spin_unlock_irqrestore(&head->b_uptodate_lock, flags); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 141 | if (!under_io) { |
Eric Biggers | d2d0727 | 2019-05-20 09:29:39 -0700 | [diff] [blame] | 142 | fscrypt_free_bounce_page(bounce_page); |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 143 | end_page_writeback(page); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 144 | } |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 145 | } |
| 146 | } |
| 147 | |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 148 | static void ext4_release_io_end(ext4_io_end_t *io_end) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 149 | { |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 150 | struct bio *bio, *next_bio; |
| 151 | |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 152 | BUG_ON(!list_empty(&io_end->list)); |
| 153 | BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); |
Jan Kara | 6b523df | 2013-06-04 13:21:11 -0400 | [diff] [blame] | 154 | WARN_ON(io_end->handle); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 155 | |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 156 | for (bio = io_end->bio; bio; bio = next_bio) { |
| 157 | next_bio = bio->bi_private; |
| 158 | ext4_finish_bio(bio); |
| 159 | bio_put(bio); |
| 160 | } |
Ritesh Harjani | c8cc881 | 2019-10-16 13:07:10 +0530 | [diff] [blame] | 161 | ext4_free_io_end_vec(io_end); |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 162 | kmem_cache_free(io_end_cachep, io_end); |
| 163 | } |
| 164 | |
Jan Kara | a115f74 | 2013-06-04 14:30:00 -0400 | [diff] [blame] | 165 | /* |
| 166 | * Check a range of space and convert unwritten extents to written. Note that |
| 167 | * we are protected from truncate touching same part of extent tree by the |
| 168 | * fact that truncate code waits for all DIO to finish (thus exclusion from |
| 169 | * direct IO is achieved) and also waits for PageWriteback bits. Thus we |
| 170 | * cannot get to ext4_ext_truncate() before all IOs overlapping that range are |
| 171 | * completed (happens from ext4_free_ioend()). |
| 172 | */ |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 173 | static int ext4_end_io_end(ext4_io_end_t *io_end) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 174 | { |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 175 | struct inode *inode = io_end->inode; |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 176 | handle_t *handle = io_end->handle; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 177 | int ret = 0; |
| 178 | |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 179 | ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p," |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 180 | "list->prev 0x%p\n", |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 181 | io_end, inode->i_ino, io_end->list.next, io_end->list.prev); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 182 | |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 183 | io_end->handle = NULL; /* Following call will use up the handle */ |
Ritesh Harjani | a00713e | 2019-10-16 13:07:08 +0530 | [diff] [blame] | 184 | ret = ext4_convert_unwritten_io_end_vec(handle, io_end); |
Theodore Ts'o | 0db1ff2 | 2017-02-05 01:28:48 -0500 | [diff] [blame] | 185 | if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) { |
Theodore Ts'o | b82e384 | 2011-10-31 10:56:32 -0400 | [diff] [blame] | 186 | ext4_msg(inode->i_sb, KERN_EMERG, |
| 187 | "failed to convert unwritten extents to written " |
| 188 | "extents -- potential data loss! " |
Ritesh Harjani | c8cc881 | 2019-10-16 13:07:10 +0530 | [diff] [blame] | 189 | "(inode %lu, error %d)", inode->i_ino, ret); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 190 | } |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 191 | ext4_clear_io_unwritten_flag(io_end); |
| 192 | ext4_release_io_end(io_end); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 193 | return ret; |
| 194 | } |
| 195 | |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 196 | static void dump_completed_IO(struct inode *inode, struct list_head *head) |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 197 | { |
| 198 | #ifdef EXT4FS_DEBUG |
| 199 | struct list_head *cur, *before, *after; |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 200 | ext4_io_end_t *io_end, *io_end0, *io_end1; |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 201 | |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 202 | if (list_empty(head)) |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 203 | return; |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 204 | |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 205 | ext4_debug("Dump inode %lu completed io list\n", inode->i_ino); |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 206 | list_for_each_entry(io_end, head, list) { |
| 207 | cur = &io_end->list; |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 208 | before = cur->prev; |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 209 | io_end0 = container_of(before, ext4_io_end_t, list); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 210 | after = cur->next; |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 211 | io_end1 = container_of(after, ext4_io_end_t, list); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 212 | |
| 213 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 214 | io_end, inode->i_ino, io_end0, io_end1); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 215 | } |
| 216 | #endif |
| 217 | } |
| 218 | |
| 219 | /* Add the io_end to per-inode completed end_io list. */ |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 220 | static void ext4_add_complete_io(ext4_io_end_t *io_end) |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 221 | { |
| 222 | struct ext4_inode_info *ei = EXT4_I(io_end->inode); |
Jan Kara | 78371a4 | 2013-10-16 08:25:11 -0400 | [diff] [blame] | 223 | struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 224 | struct workqueue_struct *wq; |
| 225 | unsigned long flags; |
| 226 | |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 227 | /* Only reserved conversions from writeback should enter here */ |
| 228 | WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); |
Jan Kara | 78371a4 | 2013-10-16 08:25:11 -0400 | [diff] [blame] | 229 | WARN_ON(!io_end->handle && sbi->s_journal); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 230 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
Jan Kara | 78371a4 | 2013-10-16 08:25:11 -0400 | [diff] [blame] | 231 | wq = sbi->rsv_conversion_wq; |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 232 | if (list_empty(&ei->i_rsv_conversion_list)) |
| 233 | queue_work(wq, &ei->i_rsv_conversion_work); |
| 234 | list_add_tail(&io_end->list, &ei->i_rsv_conversion_list); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 235 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
| 236 | } |
| 237 | |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 238 | static int ext4_do_flush_completed_IO(struct inode *inode, |
| 239 | struct list_head *head) |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 240 | { |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 241 | ext4_io_end_t *io_end; |
Jan Kara | 002bd7f | 2013-01-28 09:49:15 -0500 | [diff] [blame] | 242 | struct list_head unwritten; |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 243 | unsigned long flags; |
| 244 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 245 | int err, ret = 0; |
| 246 | |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 247 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 248 | dump_completed_IO(inode, head); |
| 249 | list_replace_init(head, &unwritten); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 250 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
| 251 | |
| 252 | while (!list_empty(&unwritten)) { |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 253 | io_end = list_entry(unwritten.next, ext4_io_end_t, list); |
| 254 | BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); |
| 255 | list_del_init(&io_end->list); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 256 | |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 257 | err = ext4_end_io_end(io_end); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 258 | if (unlikely(!ret && err)) |
| 259 | ret = err; |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 260 | } |
| 261 | return ret; |
| 262 | } |
| 263 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 264 | /* |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 265 | * work on completed IO, to convert unwritten extents to extents |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 266 | */ |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 267 | void ext4_end_io_rsv_work(struct work_struct *work) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 268 | { |
Jan Kara | 84c1754 | 2013-01-28 09:43:46 -0500 | [diff] [blame] | 269 | struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 270 | i_rsv_conversion_work); |
| 271 | ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list); |
| 272 | } |
| 273 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 274 | ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) |
| 275 | { |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 276 | ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags); |
| 277 | |
| 278 | if (io_end) { |
| 279 | io_end->inode = inode; |
| 280 | INIT_LIST_HEAD(&io_end->list); |
Ritesh Harjani | c8cc881 | 2019-10-16 13:07:10 +0530 | [diff] [blame] | 281 | INIT_LIST_HEAD(&io_end->list_vec); |
Xiyu Yang | 31d21d2 | 2021-07-19 13:59:14 +0800 | [diff] [blame] | 282 | refcount_set(&io_end->count, 1); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 283 | } |
Ritesh Harjani | 821ff38 | 2019-10-16 13:07:07 +0530 | [diff] [blame] | 284 | return io_end; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 285 | } |
| 286 | |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 287 | void ext4_put_io_end_defer(ext4_io_end_t *io_end) |
| 288 | { |
Xiyu Yang | 31d21d2 | 2021-07-19 13:59:14 +0800 | [diff] [blame] | 289 | if (refcount_dec_and_test(&io_end->count)) { |
Ritesh Harjani | c8cc881 | 2019-10-16 13:07:10 +0530 | [diff] [blame] | 290 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || |
| 291 | list_empty(&io_end->list_vec)) { |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 292 | ext4_release_io_end(io_end); |
| 293 | return; |
| 294 | } |
| 295 | ext4_add_complete_io(io_end); |
| 296 | } |
| 297 | } |
| 298 | |
| 299 | int ext4_put_io_end(ext4_io_end_t *io_end) |
| 300 | { |
| 301 | int err = 0; |
| 302 | |
Xiyu Yang | 31d21d2 | 2021-07-19 13:59:14 +0800 | [diff] [blame] | 303 | if (refcount_dec_and_test(&io_end->count)) { |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 304 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { |
Ritesh Harjani | a00713e | 2019-10-16 13:07:08 +0530 | [diff] [blame] | 305 | err = ext4_convert_unwritten_io_end_vec(io_end->handle, |
| 306 | io_end); |
Jan Kara | 6b523df | 2013-06-04 13:21:11 -0400 | [diff] [blame] | 307 | io_end->handle = NULL; |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 308 | ext4_clear_io_unwritten_flag(io_end); |
| 309 | } |
| 310 | ext4_release_io_end(io_end); |
| 311 | } |
| 312 | return err; |
| 313 | } |
| 314 | |
| 315 | ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) |
| 316 | { |
Xiyu Yang | 31d21d2 | 2021-07-19 13:59:14 +0800 | [diff] [blame] | 317 | refcount_inc(&io_end->count); |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 318 | return io_end; |
| 319 | } |
| 320 | |
Jan Kara | 822dbba | 2013-07-10 21:31:04 -0400 | [diff] [blame] | 321 | /* BIO completion function for page writeback */ |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 322 | static void ext4_end_bio(struct bio *bio) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 323 | { |
| 324 | ext4_io_end_t *io_end = bio->bi_private; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 325 | sector_t bi_sector = bio->bi_iter.bi_sector; |
Theodore Ts'o | 72d622b4 | 2017-04-30 20:08:05 -0400 | [diff] [blame] | 326 | char b[BDEVNAME_SIZE]; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 327 | |
Theodore Ts'o | 72d622b4 | 2017-04-30 20:08:05 -0400 | [diff] [blame] | 328 | if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n", |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 329 | bio_devname(bio, b), |
Theodore Ts'o | 72d622b4 | 2017-04-30 20:08:05 -0400 | [diff] [blame] | 330 | (long long) bio->bi_iter.bi_sector, |
| 331 | (unsigned) bio_sectors(bio), |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 332 | bio->bi_status)) { |
Theodore Ts'o | 72d622b4 | 2017-04-30 20:08:05 -0400 | [diff] [blame] | 333 | ext4_finish_bio(bio); |
| 334 | bio_put(bio); |
| 335 | return; |
| 336 | } |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 337 | bio->bi_end_io = NULL; |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 338 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 339 | if (bio->bi_status) { |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 340 | struct inode *inode = io_end->inode; |
| 341 | |
Matthew Wilcox | 9503c67 | 2014-04-07 10:54:20 -0400 | [diff] [blame] | 342 | ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " |
Ritesh Harjani | c8cc881 | 2019-10-16 13:07:10 +0530 | [diff] [blame] | 343 | "starting block %llu)", |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 344 | bio->bi_status, inode->i_ino, |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 345 | (unsigned long long) |
Curt Wohlgemuth | d50bdd5 | 2011-02-07 12:46:14 -0500 | [diff] [blame] | 346 | bi_sector >> (inode->i_blkbits - 9)); |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 347 | mapping_set_error(inode->i_mapping, |
| 348 | blk_status_to_errno(bio->bi_status)); |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 349 | } |
Jan Kara | 822dbba | 2013-07-10 21:31:04 -0400 | [diff] [blame] | 350 | |
| 351 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { |
| 352 | /* |
| 353 | * Link bio into list hanging from io_end. We have to do it |
| 354 | * atomically as bio completions can be racing against each |
| 355 | * other. |
| 356 | */ |
| 357 | bio->bi_private = xchg(&io_end->bio, bio); |
| 358 | ext4_put_io_end_defer(io_end); |
| 359 | } else { |
| 360 | /* |
| 361 | * Drop io_end reference early. Inode can get freed once |
| 362 | * we finish the bio. |
| 363 | */ |
| 364 | ext4_put_io_end_defer(io_end); |
| 365 | ext4_finish_bio(bio); |
| 366 | bio_put(bio); |
| 367 | } |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 368 | } |
| 369 | |
| 370 | void ext4_io_submit(struct ext4_io_submit *io) |
| 371 | { |
| 372 | struct bio *bio = io->io_bio; |
| 373 | |
| 374 | if (bio) { |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 375 | int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? |
Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 376 | REQ_SYNC : 0; |
Jens Axboe | 0127251 | 2017-06-27 09:32:37 -0600 | [diff] [blame] | 377 | io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint; |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 378 | bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 379 | submit_bio(io->io_bio); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 380 | } |
Peter Huewe | 7dc5761 | 2011-02-21 21:01:42 -0500 | [diff] [blame] | 381 | io->io_bio = NULL; |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 382 | } |
| 383 | |
| 384 | void ext4_io_submit_init(struct ext4_io_submit *io, |
| 385 | struct writeback_control *wbc) |
| 386 | { |
Tejun Heo | 5a33911 | 2015-07-21 23:50:24 -0400 | [diff] [blame] | 387 | io->io_wbc = wbc; |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 388 | io->io_bio = NULL; |
Peter Huewe | 7dc5761 | 2011-02-21 21:01:42 -0500 | [diff] [blame] | 389 | io->io_end = NULL; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 390 | } |
| 391 | |
Gao Xiang | 5500221 | 2019-10-31 17:23:15 +0800 | [diff] [blame] | 392 | static void io_submit_init_bio(struct ext4_io_submit *io, |
| 393 | struct buffer_head *bh) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 394 | { |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 395 | struct bio *bio; |
| 396 | |
Gao Xiang | 5500221 | 2019-10-31 17:23:15 +0800 | [diff] [blame] | 397 | /* |
| 398 | * bio_alloc will _always_ be able to allocate a bio if |
| 399 | * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset(). |
| 400 | */ |
Christoph Hellwig | a8affc0 | 2021-03-11 12:01:37 +0100 | [diff] [blame] | 401 | bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS); |
Eric Biggers | 4f74d15 | 2020-07-02 01:56:07 +0000 | [diff] [blame] | 402 | fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 403 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 404 | bio_set_dev(bio, bh->b_bdev); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 405 | bio->bi_end_io = ext4_end_bio; |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 406 | bio->bi_private = ext4_get_io_end(io->io_end); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 407 | io->io_bio = bio; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 408 | io->io_next_block = bh->b_blocknr; |
Dennis Zhou | fd42df3 | 2018-12-05 12:10:34 -0500 | [diff] [blame] | 409 | wbc_init_bio(io->io_wbc, bio); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 410 | } |
| 411 | |
Gao Xiang | 5500221 | 2019-10-31 17:23:15 +0800 | [diff] [blame] | 412 | static void io_submit_add_bh(struct ext4_io_submit *io, |
| 413 | struct inode *inode, |
| 414 | struct page *page, |
| 415 | struct buffer_head *bh) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 416 | { |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 417 | int ret; |
| 418 | |
Eric Biggers | 4f74d15 | 2020-07-02 01:56:07 +0000 | [diff] [blame] | 419 | if (io->io_bio && (bh->b_blocknr != io->io_next_block || |
| 420 | !fscrypt_mergeable_bio_bh(io->io_bio, bh))) { |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 421 | submit_and_retry: |
| 422 | ext4_io_submit(io); |
| 423 | } |
| 424 | if (io->io_bio == NULL) { |
Gao Xiang | 5500221 | 2019-10-31 17:23:15 +0800 | [diff] [blame] | 425 | io_submit_init_bio(io, bh); |
Jens Axboe | 0127251 | 2017-06-27 09:32:37 -0600 | [diff] [blame] | 426 | io->io_bio->bi_write_hint = inode->i_write_hint; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 427 | } |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 428 | ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); |
Theodore Ts'o | a549984 | 2013-05-11 19:07:42 -0400 | [diff] [blame] | 429 | if (ret != bh->b_size) |
| 430 | goto submit_and_retry; |
Tejun Heo | 34e51a5 | 2019-06-27 13:39:49 -0700 | [diff] [blame] | 431 | wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size); |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 432 | io->io_next_block++; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 433 | } |
| 434 | |
| 435 | int ext4_bio_write_page(struct ext4_io_submit *io, |
| 436 | struct page *page, |
| 437 | int len, |
Namjae Jeon | 1c8349a | 2014-05-12 08:12:25 -0400 | [diff] [blame] | 438 | bool keep_towrite) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 439 | { |
Eric Biggers | d2d0727 | 2019-05-20 09:29:39 -0700 | [diff] [blame] | 440 | struct page *bounce_page = NULL; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 441 | struct inode *inode = page->mapping->host; |
Eric Engestrom | 1801747 | 2016-09-30 02:14:56 -0400 | [diff] [blame] | 442 | unsigned block_start; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 443 | struct buffer_head *bh, *head; |
| 444 | int ret = 0; |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 445 | int nr_submitted = 0; |
Theodore Ts'o | 937d7b8 | 2015-10-02 23:54:58 -0400 | [diff] [blame] | 446 | int nr_to_submit = 0; |
Lei Chen | be99393 | 2020-12-11 14:54:24 +0800 | [diff] [blame] | 447 | struct writeback_control *wbc = io->io_wbc; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 448 | |
Curt Wohlgemuth | d50bdd5 | 2011-02-07 12:46:14 -0500 | [diff] [blame] | 449 | BUG_ON(!PageLocked(page)); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 450 | BUG_ON(PageWriteback(page)); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 451 | |
Namjae Jeon | 1c8349a | 2014-05-12 08:12:25 -0400 | [diff] [blame] | 452 | if (keep_towrite) |
| 453 | set_page_writeback_keepwrite(page); |
| 454 | else |
| 455 | set_page_writeback(page); |
Theodore Ts'o | a54aa76 | 2011-02-27 16:43:24 -0500 | [diff] [blame] | 456 | ClearPageError(page); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 457 | |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 458 | /* |
Linus Torvalds | f8409ab | 2014-06-08 13:03:35 -0700 | [diff] [blame] | 459 | * Comments copied from block_write_full_page: |
Jan Kara | eeece46 | 2014-05-27 12:48:55 -0400 | [diff] [blame] | 460 | * |
| 461 | * The page straddles i_size. It must be zeroed out on each and every |
| 462 | * writepage invocation because it may be mmapped. "A file is mapped |
| 463 | * in multiples of the page size. For a file that is not a multiple of |
| 464 | * the page size, the remaining memory is zeroed when mapped, and |
| 465 | * writes to that region are not written out to the file." |
| 466 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 467 | if (len < PAGE_SIZE) |
| 468 | zero_user_segment(page, len, PAGE_SIZE); |
Jan Kara | eeece46 | 2014-05-27 12:48:55 -0400 | [diff] [blame] | 469 | /* |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 470 | * In the first loop we prepare and mark buffers to submit. We have to |
| 471 | * mark all buffers in the page before submitting so that |
| 472 | * end_page_writeback() cannot be called from ext4_bio_end_io() when IO |
| 473 | * on the first buffer finishes and we are still working on submitting |
| 474 | * the second buffer. |
| 475 | */ |
| 476 | bh = head = page_buffers(page); |
| 477 | do { |
| 478 | block_start = bh_offset(bh); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 479 | if (block_start >= len) { |
| 480 | clear_buffer_dirty(bh); |
| 481 | set_buffer_uptodate(bh); |
| 482 | continue; |
| 483 | } |
Jan Kara | 8a850c3 | 2013-01-28 20:53:28 -0500 | [diff] [blame] | 484 | if (!buffer_dirty(bh) || buffer_delay(bh) || |
| 485 | !buffer_mapped(bh) || buffer_unwritten(bh)) { |
| 486 | /* A hole? We can safely clear the dirty bit */ |
| 487 | if (!buffer_mapped(bh)) |
| 488 | clear_buffer_dirty(bh); |
| 489 | if (io->io_bio) |
| 490 | ext4_io_submit(io); |
| 491 | continue; |
| 492 | } |
zhangyi (F) | 16e08b1 | 2019-02-10 23:32:07 -0500 | [diff] [blame] | 493 | if (buffer_new(bh)) |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 494 | clear_buffer_new(bh); |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 495 | set_buffer_async_write(bh); |
Theodore Ts'o | 937d7b8 | 2015-10-02 23:54:58 -0400 | [diff] [blame] | 496 | nr_to_submit++; |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 497 | } while ((bh = bh->b_this_page) != head); |
| 498 | |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 499 | bh = head = page_buffers(page); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 500 | |
Eric Biggers | 6e4b73b | 2019-05-20 09:29:52 -0700 | [diff] [blame] | 501 | /* |
| 502 | * If any blocks are being written to an encrypted file, encrypt them |
| 503 | * into a bounce page. For simplicity, just encrypt until the last |
| 504 | * block which might be needed. This may cause some unneeded blocks |
| 505 | * (e.g. holes) to be unnecessarily encrypted, but this is rare and |
| 506 | * can't happen in the common case of blocksize == PAGE_SIZE. |
| 507 | */ |
Eric Biggers | 4f74d15 | 2020-07-02 01:56:07 +0000 | [diff] [blame] | 508 | if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) { |
Theodore Ts'o | c9af28f | 2016-03-26 16:14:34 -0400 | [diff] [blame] | 509 | gfp_t gfp_flags = GFP_NOFS; |
Eric Biggers | 6e4b73b | 2019-05-20 09:29:52 -0700 | [diff] [blame] | 510 | unsigned int enc_bytes = round_up(len, i_blocksize(inode)); |
Theodore Ts'o | c9af28f | 2016-03-26 16:14:34 -0400 | [diff] [blame] | 511 | |
Eric Biggers | 547c556 | 2019-12-31 12:11:49 -0600 | [diff] [blame] | 512 | /* |
| 513 | * Since bounce page allocation uses a mempool, we can only use |
| 514 | * a waiting mask (i.e. request guaranteed allocation) on the |
| 515 | * first page of the bio. Otherwise it can deadlock. |
| 516 | */ |
| 517 | if (io->io_bio) |
| 518 | gfp_flags = GFP_NOWAIT | __GFP_NOWARN; |
Theodore Ts'o | c9af28f | 2016-03-26 16:14:34 -0400 | [diff] [blame] | 519 | retry_encrypt: |
Eric Biggers | 6e4b73b | 2019-05-20 09:29:52 -0700 | [diff] [blame] | 520 | bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes, |
Eric Biggers | 53bc1d85 | 2019-05-20 09:29:44 -0700 | [diff] [blame] | 521 | 0, gfp_flags); |
Eric Biggers | d2d0727 | 2019-05-20 09:29:39 -0700 | [diff] [blame] | 522 | if (IS_ERR(bounce_page)) { |
| 523 | ret = PTR_ERR(bounce_page); |
Eric Biggers | 547c556 | 2019-12-31 12:11:49 -0600 | [diff] [blame] | 524 | if (ret == -ENOMEM && |
| 525 | (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) { |
NeilBrown | 4034247 | 2022-01-14 14:07:14 -0800 | [diff] [blame] | 526 | gfp_t new_gfp_flags = GFP_NOFS; |
Eric Biggers | 547c556 | 2019-12-31 12:11:49 -0600 | [diff] [blame] | 527 | if (io->io_bio) |
Theodore Ts'o | c9af28f | 2016-03-26 16:14:34 -0400 | [diff] [blame] | 528 | ext4_io_submit(io); |
Eric Biggers | 547c556 | 2019-12-31 12:11:49 -0600 | [diff] [blame] | 529 | else |
NeilBrown | 4034247 | 2022-01-14 14:07:14 -0800 | [diff] [blame] | 530 | new_gfp_flags |= __GFP_NOFAIL; |
| 531 | memalloc_retry_wait(gfp_flags); |
| 532 | gfp_flags = new_gfp_flags; |
Theodore Ts'o | c9af28f | 2016-03-26 16:14:34 -0400 | [diff] [blame] | 533 | goto retry_encrypt; |
| 534 | } |
Gao Xiang | 5500221 | 2019-10-31 17:23:15 +0800 | [diff] [blame] | 535 | |
| 536 | printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); |
| 537 | redirty_page_for_writepage(wbc, page); |
| 538 | do { |
| 539 | clear_buffer_async_write(bh); |
| 540 | bh = bh->b_this_page; |
| 541 | } while (bh != head); |
| 542 | goto unlock; |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 543 | } |
| 544 | } |
| 545 | |
| 546 | /* Now submit buffers to write */ |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 547 | do { |
| 548 | if (!buffer_async_write(bh)) |
| 549 | continue; |
Gao Xiang | 5500221 | 2019-10-31 17:23:15 +0800 | [diff] [blame] | 550 | io_submit_add_bh(io, inode, |
| 551 | bounce_page ? bounce_page : page, bh); |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 552 | nr_submitted++; |
Jan Kara | 1ae48a6 | 2013-01-28 09:32:54 -0500 | [diff] [blame] | 553 | clear_buffer_dirty(bh); |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 554 | } while ((bh = bh->b_this_page) != head); |
| 555 | |
Gao Xiang | 5500221 | 2019-10-31 17:23:15 +0800 | [diff] [blame] | 556 | unlock: |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 557 | unlock_page(page); |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 558 | /* Nothing submitted - we have to end page writeback */ |
| 559 | if (!nr_submitted) |
| 560 | end_page_writeback(page); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 561 | return ret; |
| 562 | } |