blob: defd2e10dfd10bf9e71e09650de369dbaaf15920 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Theodore Ts'obd2d0212010-10-27 21:30:10 -04002/*
3 * linux/fs/ext4/page-io.c
4 *
5 * This contains the new page_io functions for ext4
6 *
7 * Written by Theodore Ts'o, 2010.
8 */
9
Theodore Ts'obd2d0212010-10-27 21:30:10 -040010#include <linux/fs.h>
11#include <linux/time.h>
Theodore Ts'obd2d0212010-10-27 21:30:10 -040012#include <linux/highuid.h>
13#include <linux/pagemap.h>
14#include <linux/quotaops.h>
15#include <linux/string.h>
16#include <linux/buffer_head.h>
17#include <linux/writeback.h>
18#include <linux/pagevec.h>
19#include <linux/mpage.h>
20#include <linux/namei.h>
21#include <linux/uio.h>
22#include <linux/bio.h>
23#include <linux/workqueue.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
Jan Kara1ae48a62013-01-28 09:32:54 -050026#include <linux/mm.h>
Theodore Ts'oc9af28f2016-03-26 16:14:34 -040027#include <linux/backing-dev.h>
Theodore Ts'obd2d0212010-10-27 21:30:10 -040028
29#include "ext4_jbd2.h"
30#include "xattr.h"
31#include "acl.h"
Theodore Ts'obd2d0212010-10-27 21:30:10 -040032
Jan Kara0058f962013-04-11 23:48:32 -040033static struct kmem_cache *io_end_cachep;
Ritesh Harjanic8cc8812019-10-16 13:07:10 +053034static struct kmem_cache *io_end_vec_cachep;
Theodore Ts'obd2d0212010-10-27 21:30:10 -040035
Theodore Ts'o5dabfc72010-10-27 21:30:14 -040036int __init ext4_init_pageio(void)
Theodore Ts'obd2d0212010-10-27 21:30:10 -040037{
Theodore Ts'obd2d0212010-10-27 21:30:10 -040038 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
Jan Kara0058f962013-04-11 23:48:32 -040039 if (io_end_cachep == NULL)
Theodore Ts'obd2d0212010-10-27 21:30:10 -040040 return -ENOMEM;
Ritesh Harjanic8cc8812019-10-16 13:07:10 +053041
42 io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
43 if (io_end_vec_cachep == NULL) {
44 kmem_cache_destroy(io_end_cachep);
45 return -ENOMEM;
46 }
Theodore Ts'obd2d0212010-10-27 21:30:10 -040047 return 0;
48}
49
Theodore Ts'o5dabfc72010-10-27 21:30:14 -040050void ext4_exit_pageio(void)
Theodore Ts'obd2d0212010-10-27 21:30:10 -040051{
52 kmem_cache_destroy(io_end_cachep);
Ritesh Harjanic8cc8812019-10-16 13:07:10 +053053 kmem_cache_destroy(io_end_vec_cachep);
54}
55
56struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
57{
58 struct ext4_io_end_vec *io_end_vec;
59
60 io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
61 if (!io_end_vec)
62 return ERR_PTR(-ENOMEM);
63 INIT_LIST_HEAD(&io_end_vec->list);
64 list_add_tail(&io_end_vec->list, &io_end->list_vec);
65 return io_end_vec;
66}
67
68static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
69{
70 struct ext4_io_end_vec *io_end_vec, *tmp;
71
72 if (list_empty(&io_end->list_vec))
73 return;
74 list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
75 list_del(&io_end_vec->list);
76 kmem_cache_free(io_end_vec_cachep, io_end_vec);
77 }
78}
79
80struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
81{
82 BUG_ON(list_empty(&io_end->list_vec));
83 return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
Theodore Ts'obd2d0212010-10-27 21:30:10 -040084}
85
Theodore Ts'o1ada47d2013-03-20 09:39:42 -040086/*
Jan Karab0857d32013-06-04 14:23:41 -040087 * Print an buffer I/O error compatible with the fs/buffer.c. This
88 * provides compatibility with dmesg scrapers that look for a specific
89 * buffer I/O error message. We really need a unified error reporting
90 * structure to userspace ala Digital Unix's uerf system, but it's
91 * probably not going to happen in my lifetime, due to LKML politics...
92 */
93static void buffer_io_error(struct buffer_head *bh)
94{
Dmitry Monakhova1c6f0572015-04-13 16:31:37 +040095 printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
96 bh->b_bdev,
Jan Karab0857d32013-06-04 14:23:41 -040097 (unsigned long long)bh->b_blocknr);
98}
99
100static void ext4_finish_bio(struct bio *bio)
101{
Kent Overstreet2c30c712013-11-07 12:20:26 -0800102 struct bio_vec *bvec;
Ming Lei6dc4f102019-02-15 19:13:19 +0800103 struct bvec_iter_all iter_all;
Jan Karab0857d32013-06-04 14:23:41 -0400104
Christoph Hellwig2b070cf2019-04-25 09:03:00 +0200105 bio_for_each_segment_all(bvec, bio, iter_all) {
Jan Karab0857d32013-06-04 14:23:41 -0400106 struct page *page = bvec->bv_page;
Eric Biggersd2d07272019-05-20 09:29:39 -0700107 struct page *bounce_page = NULL;
Jan Karab0857d32013-06-04 14:23:41 -0400108 struct buffer_head *bh, *head;
109 unsigned bio_start = bvec->bv_offset;
110 unsigned bio_end = bio_start + bvec->bv_len;
111 unsigned under_io = 0;
112 unsigned long flags;
113
114 if (!page)
115 continue;
116
Eric Biggersd2d07272019-05-20 09:29:39 -0700117 if (fscrypt_is_bounce_page(page)) {
118 bounce_page = page;
119 page = fscrypt_pagecache_page(bounce_page);
Michael Halcrow2058f832015-04-12 00:55:10 -0400120 }
Michael Halcrow2058f832015-04-12 00:55:10 -0400121
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200122 if (bio->bi_status) {
Jan Karab0857d32013-06-04 14:23:41 -0400123 SetPageError(page);
Michal Hocko5114a972016-10-11 13:56:01 -0700124 mapping_set_error(page->mapping, -EIO);
Jan Karab0857d32013-06-04 14:23:41 -0400125 }
126 bh = head = page_buffers(page);
127 /*
Thomas Gleixnerf1e67e32019-11-18 14:28:24 +0100128 * We check all buffers in the page under b_uptodate_lock
Jan Karab0857d32013-06-04 14:23:41 -0400129 * to avoid races with other end io clearing async_write flags
130 */
Thomas Gleixnerf1e67e32019-11-18 14:28:24 +0100131 spin_lock_irqsave(&head->b_uptodate_lock, flags);
Jan Karab0857d32013-06-04 14:23:41 -0400132 do {
133 if (bh_offset(bh) < bio_start ||
134 bh_offset(bh) + bh->b_size > bio_end) {
135 if (buffer_async_write(bh))
136 under_io++;
137 continue;
138 }
139 clear_buffer_async_write(bh);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200140 if (bio->bi_status)
Jan Karab0857d32013-06-04 14:23:41 -0400141 buffer_io_error(bh);
142 } while ((bh = bh->b_this_page) != head);
Thomas Gleixnerf1e67e32019-11-18 14:28:24 +0100143 spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
Michael Halcrow2058f832015-04-12 00:55:10 -0400144 if (!under_io) {
Eric Biggersd2d07272019-05-20 09:29:39 -0700145 fscrypt_free_bounce_page(bounce_page);
Jan Karab0857d32013-06-04 14:23:41 -0400146 end_page_writeback(page);
Michael Halcrow2058f832015-04-12 00:55:10 -0400147 }
Jan Karab0857d32013-06-04 14:23:41 -0400148 }
149}
150
Jan Kara97a851e2013-06-04 11:58:58 -0400151static void ext4_release_io_end(ext4_io_end_t *io_end)
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400152{
Jan Karab0857d32013-06-04 14:23:41 -0400153 struct bio *bio, *next_bio;
154
Jan Kara97a851e2013-06-04 11:58:58 -0400155 BUG_ON(!list_empty(&io_end->list));
156 BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
Jan Kara6b523df2013-06-04 13:21:11 -0400157 WARN_ON(io_end->handle);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400158
Jan Karab0857d32013-06-04 14:23:41 -0400159 for (bio = io_end->bio; bio; bio = next_bio) {
160 next_bio = bio->bi_private;
161 ext4_finish_bio(bio);
162 bio_put(bio);
163 }
Ritesh Harjanic8cc8812019-10-16 13:07:10 +0530164 ext4_free_io_end_vec(io_end);
Jan Kara97a851e2013-06-04 11:58:58 -0400165 kmem_cache_free(io_end_cachep, io_end);
166}
167
Jan Karaa115f742013-06-04 14:30:00 -0400168/*
169 * Check a range of space and convert unwritten extents to written. Note that
170 * we are protected from truncate touching same part of extent tree by the
171 * fact that truncate code waits for all DIO to finish (thus exclusion from
172 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
173 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
174 * completed (happens from ext4_free_ioend()).
175 */
Ritesh Harjani821ff382019-10-16 13:07:07 +0530176static int ext4_end_io_end(ext4_io_end_t *io_end)
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400177{
Ritesh Harjani821ff382019-10-16 13:07:07 +0530178 struct inode *inode = io_end->inode;
Ritesh Harjani821ff382019-10-16 13:07:07 +0530179 handle_t *handle = io_end->handle;
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400180 int ret = 0;
181
Ritesh Harjani821ff382019-10-16 13:07:07 +0530182 ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400183 "list->prev 0x%p\n",
Ritesh Harjani821ff382019-10-16 13:07:07 +0530184 io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400185
Ritesh Harjani821ff382019-10-16 13:07:07 +0530186 io_end->handle = NULL; /* Following call will use up the handle */
Ritesh Harjania00713e2019-10-16 13:07:08 +0530187 ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500188 if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
Theodore Ts'ob82e3842011-10-31 10:56:32 -0400189 ext4_msg(inode->i_sb, KERN_EMERG,
190 "failed to convert unwritten extents to written "
191 "extents -- potential data loss! "
Ritesh Harjanic8cc8812019-10-16 13:07:10 +0530192 "(inode %lu, error %d)", inode->i_ino, ret);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400193 }
Ritesh Harjani821ff382019-10-16 13:07:07 +0530194 ext4_clear_io_unwritten_flag(io_end);
195 ext4_release_io_end(io_end);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400196 return ret;
197}
198
Jan Kara2e8fa542013-06-04 14:21:02 -0400199static void dump_completed_IO(struct inode *inode, struct list_head *head)
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400200{
201#ifdef EXT4FS_DEBUG
202 struct list_head *cur, *before, *after;
Ritesh Harjani821ff382019-10-16 13:07:07 +0530203 ext4_io_end_t *io_end, *io_end0, *io_end1;
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400204
Jan Kara2e8fa542013-06-04 14:21:02 -0400205 if (list_empty(head))
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400206 return;
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400207
Jan Kara2e8fa542013-06-04 14:21:02 -0400208 ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
Ritesh Harjani821ff382019-10-16 13:07:07 +0530209 list_for_each_entry(io_end, head, list) {
210 cur = &io_end->list;
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400211 before = cur->prev;
Ritesh Harjani821ff382019-10-16 13:07:07 +0530212 io_end0 = container_of(before, ext4_io_end_t, list);
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400213 after = cur->next;
Ritesh Harjani821ff382019-10-16 13:07:07 +0530214 io_end1 = container_of(after, ext4_io_end_t, list);
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400215
216 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
Ritesh Harjani821ff382019-10-16 13:07:07 +0530217 io_end, inode->i_ino, io_end0, io_end1);
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400218 }
219#endif
220}
221
222/* Add the io_end to per-inode completed end_io list. */
Jan Kara97a851e2013-06-04 11:58:58 -0400223static void ext4_add_complete_io(ext4_io_end_t *io_end)
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400224{
225 struct ext4_inode_info *ei = EXT4_I(io_end->inode);
Jan Kara78371a42013-10-16 08:25:11 -0400226 struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400227 struct workqueue_struct *wq;
228 unsigned long flags;
229
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200230 /* Only reserved conversions from writeback should enter here */
231 WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
Jan Kara78371a42013-10-16 08:25:11 -0400232 WARN_ON(!io_end->handle && sbi->s_journal);
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400233 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
Jan Kara78371a42013-10-16 08:25:11 -0400234 wq = sbi->rsv_conversion_wq;
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200235 if (list_empty(&ei->i_rsv_conversion_list))
236 queue_work(wq, &ei->i_rsv_conversion_work);
237 list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400238 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
239}
240
Jan Kara2e8fa542013-06-04 14:21:02 -0400241static int ext4_do_flush_completed_IO(struct inode *inode,
242 struct list_head *head)
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400243{
Ritesh Harjani821ff382019-10-16 13:07:07 +0530244 ext4_io_end_t *io_end;
Jan Kara002bd7f2013-01-28 09:49:15 -0500245 struct list_head unwritten;
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400246 unsigned long flags;
247 struct ext4_inode_info *ei = EXT4_I(inode);
248 int err, ret = 0;
249
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400250 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
Jan Kara2e8fa542013-06-04 14:21:02 -0400251 dump_completed_IO(inode, head);
252 list_replace_init(head, &unwritten);
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400253 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
254
255 while (!list_empty(&unwritten)) {
Ritesh Harjani821ff382019-10-16 13:07:07 +0530256 io_end = list_entry(unwritten.next, ext4_io_end_t, list);
257 BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
258 list_del_init(&io_end->list);
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400259
Ritesh Harjani821ff382019-10-16 13:07:07 +0530260 err = ext4_end_io_end(io_end);
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400261 if (unlikely(!ret && err))
262 ret = err;
Dmitry Monakhov28a535f2012-09-29 00:14:55 -0400263 }
264 return ret;
265}
266
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400267/*
Jan Kara2e8fa542013-06-04 14:21:02 -0400268 * work on completed IO, to convert unwritten extents to extents
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400269 */
Jan Kara2e8fa542013-06-04 14:21:02 -0400270void ext4_end_io_rsv_work(struct work_struct *work)
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400271{
Jan Kara84c17542013-01-28 09:43:46 -0500272 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
Jan Kara2e8fa542013-06-04 14:21:02 -0400273 i_rsv_conversion_work);
274 ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
275}
276
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400277ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
278{
Ritesh Harjani821ff382019-10-16 13:07:07 +0530279 ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
280
281 if (io_end) {
282 io_end->inode = inode;
283 INIT_LIST_HEAD(&io_end->list);
Ritesh Harjanic8cc8812019-10-16 13:07:10 +0530284 INIT_LIST_HEAD(&io_end->list_vec);
Ritesh Harjani821ff382019-10-16 13:07:07 +0530285 atomic_set(&io_end->count, 1);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400286 }
Ritesh Harjani821ff382019-10-16 13:07:07 +0530287 return io_end;
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400288}
289
Jan Kara97a851e2013-06-04 11:58:58 -0400290void ext4_put_io_end_defer(ext4_io_end_t *io_end)
291{
292 if (atomic_dec_and_test(&io_end->count)) {
Ritesh Harjanic8cc8812019-10-16 13:07:10 +0530293 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
294 list_empty(&io_end->list_vec)) {
Jan Kara97a851e2013-06-04 11:58:58 -0400295 ext4_release_io_end(io_end);
296 return;
297 }
298 ext4_add_complete_io(io_end);
299 }
300}
301
302int ext4_put_io_end(ext4_io_end_t *io_end)
303{
304 int err = 0;
305
306 if (atomic_dec_and_test(&io_end->count)) {
307 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
Ritesh Harjania00713e2019-10-16 13:07:08 +0530308 err = ext4_convert_unwritten_io_end_vec(io_end->handle,
309 io_end);
Jan Kara6b523df2013-06-04 13:21:11 -0400310 io_end->handle = NULL;
Jan Kara97a851e2013-06-04 11:58:58 -0400311 ext4_clear_io_unwritten_flag(io_end);
312 }
313 ext4_release_io_end(io_end);
314 }
315 return err;
316}
317
318ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
319{
320 atomic_inc(&io_end->count);
321 return io_end;
322}
323
Jan Kara822dbba2013-07-10 21:31:04 -0400324/* BIO completion function for page writeback */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200325static void ext4_end_bio(struct bio *bio)
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400326{
327 ext4_io_end_t *io_end = bio->bi_private;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700328 sector_t bi_sector = bio->bi_iter.bi_sector;
Theodore Ts'o72d622b42017-04-30 20:08:05 -0400329 char b[BDEVNAME_SIZE];
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400330
Theodore Ts'o72d622b42017-04-30 20:08:05 -0400331 if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n",
Christoph Hellwig74d46992017-08-23 19:10:32 +0200332 bio_devname(bio, b),
Theodore Ts'o72d622b42017-04-30 20:08:05 -0400333 (long long) bio->bi_iter.bi_sector,
334 (unsigned) bio_sectors(bio),
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200335 bio->bi_status)) {
Theodore Ts'o72d622b42017-04-30 20:08:05 -0400336 ext4_finish_bio(bio);
337 bio_put(bio);
338 return;
339 }
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400340 bio->bi_end_io = NULL;
Jan Kara0058f962013-04-11 23:48:32 -0400341
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200342 if (bio->bi_status) {
Jan Karab0857d32013-06-04 14:23:41 -0400343 struct inode *inode = io_end->inode;
344
Matthew Wilcox9503c672014-04-07 10:54:20 -0400345 ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
Ritesh Harjanic8cc8812019-10-16 13:07:10 +0530346 "starting block %llu)",
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200347 bio->bi_status, inode->i_ino,
Theodore Ts'of7ad6d22010-11-08 13:43:33 -0500348 (unsigned long long)
Curt Wohlgemuthd50bdd52011-02-07 12:46:14 -0500349 bi_sector >> (inode->i_blkbits - 9));
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200350 mapping_set_error(inode->i_mapping,
351 blk_status_to_errno(bio->bi_status));
Theodore Ts'of7ad6d22010-11-08 13:43:33 -0500352 }
Jan Kara822dbba2013-07-10 21:31:04 -0400353
354 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
355 /*
356 * Link bio into list hanging from io_end. We have to do it
357 * atomically as bio completions can be racing against each
358 * other.
359 */
360 bio->bi_private = xchg(&io_end->bio, bio);
361 ext4_put_io_end_defer(io_end);
362 } else {
363 /*
364 * Drop io_end reference early. Inode can get freed once
365 * we finish the bio.
366 */
367 ext4_put_io_end_defer(io_end);
368 ext4_finish_bio(bio);
369 bio_put(bio);
370 }
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400371}
372
373void ext4_io_submit(struct ext4_io_submit *io)
374{
375 struct bio *bio = io->io_bio;
376
377 if (bio) {
Mike Christie95fe6c12016-06-05 14:31:48 -0500378 int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600379 REQ_SYNC : 0;
Jens Axboe01272512017-06-27 09:32:37 -0600380 io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
Mike Christie95fe6c12016-06-05 14:31:48 -0500381 bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
Mike Christie4e49ea42016-06-05 14:31:41 -0500382 submit_bio(io->io_bio);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400383 }
Peter Huewe7dc57612011-02-21 21:01:42 -0500384 io->io_bio = NULL;
Jan Kara97a851e2013-06-04 11:58:58 -0400385}
386
387void ext4_io_submit_init(struct ext4_io_submit *io,
388 struct writeback_control *wbc)
389{
Tejun Heo5a339112015-07-21 23:50:24 -0400390 io->io_wbc = wbc;
Jan Kara97a851e2013-06-04 11:58:58 -0400391 io->io_bio = NULL;
Peter Huewe7dc57612011-02-21 21:01:42 -0500392 io->io_end = NULL;
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400393}
394
Gao Xiang55002212019-10-31 17:23:15 +0800395static void io_submit_init_bio(struct ext4_io_submit *io,
396 struct buffer_head *bh)
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400397{
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400398 struct bio *bio;
399
Gao Xiang55002212019-10-31 17:23:15 +0800400 /*
401 * bio_alloc will _always_ be able to allocate a bio if
402 * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
403 */
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200404 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
Eric Biggers4f74d152020-07-02 01:56:07 +0000405 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700406 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200407 bio_set_dev(bio, bh->b_bdev);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400408 bio->bi_end_io = ext4_end_bio;
Jan Kara97a851e2013-06-04 11:58:58 -0400409 bio->bi_private = ext4_get_io_end(io->io_end);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400410 io->io_bio = bio;
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400411 io->io_next_block = bh->b_blocknr;
Dennis Zhoufd42df32018-12-05 12:10:34 -0500412 wbc_init_bio(io->io_wbc, bio);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400413}
414
Gao Xiang55002212019-10-31 17:23:15 +0800415static void io_submit_add_bh(struct ext4_io_submit *io,
416 struct inode *inode,
417 struct page *page,
418 struct buffer_head *bh)
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400419{
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400420 int ret;
421
Eric Biggers4f74d152020-07-02 01:56:07 +0000422 if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
423 !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400424submit_and_retry:
425 ext4_io_submit(io);
426 }
427 if (io->io_bio == NULL) {
Gao Xiang55002212019-10-31 17:23:15 +0800428 io_submit_init_bio(io, bh);
Jens Axboe01272512017-06-27 09:32:37 -0600429 io->io_bio->bi_write_hint = inode->i_write_hint;
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400430 }
Michael Halcrow2058f832015-04-12 00:55:10 -0400431 ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
Theodore Ts'oa5499842013-05-11 19:07:42 -0400432 if (ret != bh->b_size)
433 goto submit_and_retry;
Tejun Heo34e51a52019-06-27 13:39:49 -0700434 wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
Jan Kara97a851e2013-06-04 11:58:58 -0400435 io->io_next_block++;
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400436}
437
438int ext4_bio_write_page(struct ext4_io_submit *io,
439 struct page *page,
440 int len,
Namjae Jeon1c8349a2014-05-12 08:12:25 -0400441 struct writeback_control *wbc,
442 bool keep_towrite)
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400443{
Eric Biggersd2d07272019-05-20 09:29:39 -0700444 struct page *bounce_page = NULL;
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400445 struct inode *inode = page->mapping->host;
Eric Engestrom18017472016-09-30 02:14:56 -0400446 unsigned block_start;
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400447 struct buffer_head *bh, *head;
448 int ret = 0;
Jan Kara0058f962013-04-11 23:48:32 -0400449 int nr_submitted = 0;
Theodore Ts'o937d7b82015-10-02 23:54:58 -0400450 int nr_to_submit = 0;
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400451
Curt Wohlgemuthd50bdd52011-02-07 12:46:14 -0500452 BUG_ON(!PageLocked(page));
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400453 BUG_ON(PageWriteback(page));
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400454
Namjae Jeon1c8349a2014-05-12 08:12:25 -0400455 if (keep_towrite)
456 set_page_writeback_keepwrite(page);
457 else
458 set_page_writeback(page);
Theodore Ts'oa54aa762011-02-27 16:43:24 -0500459 ClearPageError(page);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400460
Jan Kara0058f962013-04-11 23:48:32 -0400461 /*
Linus Torvaldsf8409ab2014-06-08 13:03:35 -0700462 * Comments copied from block_write_full_page:
Jan Karaeeece462014-05-27 12:48:55 -0400463 *
464 * The page straddles i_size. It must be zeroed out on each and every
465 * writepage invocation because it may be mmapped. "A file is mapped
466 * in multiples of the page size. For a file that is not a multiple of
467 * the page size, the remaining memory is zeroed when mapped, and
468 * writes to that region are not written out to the file."
469 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300470 if (len < PAGE_SIZE)
471 zero_user_segment(page, len, PAGE_SIZE);
Jan Karaeeece462014-05-27 12:48:55 -0400472 /*
Jan Kara0058f962013-04-11 23:48:32 -0400473 * In the first loop we prepare and mark buffers to submit. We have to
474 * mark all buffers in the page before submitting so that
475 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
476 * on the first buffer finishes and we are still working on submitting
477 * the second buffer.
478 */
479 bh = head = page_buffers(page);
480 do {
481 block_start = bh_offset(bh);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400482 if (block_start >= len) {
483 clear_buffer_dirty(bh);
484 set_buffer_uptodate(bh);
485 continue;
486 }
Jan Kara8a850c32013-01-28 20:53:28 -0500487 if (!buffer_dirty(bh) || buffer_delay(bh) ||
488 !buffer_mapped(bh) || buffer_unwritten(bh)) {
489 /* A hole? We can safely clear the dirty bit */
490 if (!buffer_mapped(bh))
491 clear_buffer_dirty(bh);
492 if (io->io_bio)
493 ext4_io_submit(io);
494 continue;
495 }
zhangyi (F)16e08b12019-02-10 23:32:07 -0500496 if (buffer_new(bh))
Jan Kara0058f962013-04-11 23:48:32 -0400497 clear_buffer_new(bh);
Jan Kara0058f962013-04-11 23:48:32 -0400498 set_buffer_async_write(bh);
Theodore Ts'o937d7b82015-10-02 23:54:58 -0400499 nr_to_submit++;
Jan Kara0058f962013-04-11 23:48:32 -0400500 } while ((bh = bh->b_this_page) != head);
501
Jan Kara0058f962013-04-11 23:48:32 -0400502 bh = head = page_buffers(page);
Michael Halcrow2058f832015-04-12 00:55:10 -0400503
Eric Biggers6e4b73b2019-05-20 09:29:52 -0700504 /*
505 * If any blocks are being written to an encrypted file, encrypt them
506 * into a bounce page. For simplicity, just encrypt until the last
507 * block which might be needed. This may cause some unneeded blocks
508 * (e.g. holes) to be unnecessarily encrypted, but this is rare and
509 * can't happen in the common case of blocksize == PAGE_SIZE.
510 */
Eric Biggers4f74d152020-07-02 01:56:07 +0000511 if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) {
Theodore Ts'oc9af28f2016-03-26 16:14:34 -0400512 gfp_t gfp_flags = GFP_NOFS;
Eric Biggers6e4b73b2019-05-20 09:29:52 -0700513 unsigned int enc_bytes = round_up(len, i_blocksize(inode));
Theodore Ts'oc9af28f2016-03-26 16:14:34 -0400514
Eric Biggers547c5562019-12-31 12:11:49 -0600515 /*
516 * Since bounce page allocation uses a mempool, we can only use
517 * a waiting mask (i.e. request guaranteed allocation) on the
518 * first page of the bio. Otherwise it can deadlock.
519 */
520 if (io->io_bio)
521 gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
Theodore Ts'oc9af28f2016-03-26 16:14:34 -0400522 retry_encrypt:
Eric Biggers6e4b73b2019-05-20 09:29:52 -0700523 bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
Eric Biggers53bc1d852019-05-20 09:29:44 -0700524 0, gfp_flags);
Eric Biggersd2d07272019-05-20 09:29:39 -0700525 if (IS_ERR(bounce_page)) {
526 ret = PTR_ERR(bounce_page);
Eric Biggers547c5562019-12-31 12:11:49 -0600527 if (ret == -ENOMEM &&
528 (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
529 gfp_flags = GFP_NOFS;
530 if (io->io_bio)
Theodore Ts'oc9af28f2016-03-26 16:14:34 -0400531 ext4_io_submit(io);
Eric Biggers547c5562019-12-31 12:11:49 -0600532 else
533 gfp_flags |= __GFP_NOFAIL;
534 congestion_wait(BLK_RW_ASYNC, HZ/50);
Theodore Ts'oc9af28f2016-03-26 16:14:34 -0400535 goto retry_encrypt;
536 }
Gao Xiang55002212019-10-31 17:23:15 +0800537
538 printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
539 redirty_page_for_writepage(wbc, page);
540 do {
541 clear_buffer_async_write(bh);
542 bh = bh->b_this_page;
543 } while (bh != head);
544 goto unlock;
Michael Halcrow2058f832015-04-12 00:55:10 -0400545 }
546 }
547
548 /* Now submit buffers to write */
Jan Kara0058f962013-04-11 23:48:32 -0400549 do {
550 if (!buffer_async_write(bh))
551 continue;
Gao Xiang55002212019-10-31 17:23:15 +0800552 io_submit_add_bh(io, inode,
553 bounce_page ? bounce_page : page, bh);
Jan Kara0058f962013-04-11 23:48:32 -0400554 nr_submitted++;
Jan Kara1ae48a62013-01-28 09:32:54 -0500555 clear_buffer_dirty(bh);
Jan Kara0058f962013-04-11 23:48:32 -0400556 } while ((bh = bh->b_this_page) != head);
557
Gao Xiang55002212019-10-31 17:23:15 +0800558unlock:
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400559 unlock_page(page);
Jan Kara0058f962013-04-11 23:48:32 -0400560 /* Nothing submitted - we have to end page writeback */
561 if (!nr_submitted)
562 end_page_writeback(page);
Theodore Ts'obd2d0212010-10-27 21:30:10 -0400563 return ret;
564}