blob: d6d1fd0208a9d1b953335d93f31738dae31564de [file] [log] [blame]
Darrick J. Wongafc51aa2019-07-15 08:50:59 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Red Hat, Inc.
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07004 * Copyright (C) 2016-2019 Christoph Hellwig.
Darrick J. Wongafc51aa2019-07-15 08:50:59 -07005 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/pagemap.h>
11#include <linux/uio.h>
12#include <linux/buffer_head.h>
13#include <linux/dax.h>
14#include <linux/writeback.h>
Christoph Hellwig598ecfb2019-10-17 13:12:15 -070015#include <linux/list_sort.h>
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070016#include <linux/swap.h>
17#include <linux/bio.h>
18#include <linux/sched/signal.h>
19#include <linux/migrate.h>
Christoph Hellwig9e91c572019-10-17 13:12:13 -070020#include "trace.h"
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070021
22#include "../internal.h"
23
Christoph Hellwigab08b012019-10-17 13:12:19 -070024/*
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070025 * Structure allocated for each page or THP when block size < page size
26 * to track sub-page uptodate status and I/O completions.
Christoph Hellwigab08b012019-10-17 13:12:19 -070027 */
28struct iomap_page {
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -070029 atomic_t read_bytes_pending;
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -070030 atomic_t write_bytes_pending;
Christoph Hellwig1cea3352019-12-04 09:33:52 -080031 spinlock_t uptodate_lock;
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070032 unsigned long uptodate[];
Christoph Hellwigab08b012019-10-17 13:12:19 -070033};
34
35static inline struct iomap_page *to_iomap_page(struct page *page)
36{
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070037 /*
38 * per-block data is stored in the head page. Callers should
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -070039 * not be dealing with tail pages, and if they are, they can
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070040 * call thp_head() first.
41 */
42 VM_BUG_ON_PGFLAGS(PageTail(page), page);
43
Christoph Hellwigab08b012019-10-17 13:12:19 -070044 if (page_has_private(page))
45 return (struct iomap_page *)page_private(page);
46 return NULL;
47}
48
Christoph Hellwig598ecfb2019-10-17 13:12:15 -070049static struct bio_set iomap_ioend_bioset;
50
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070051static struct iomap_page *
52iomap_page_create(struct inode *inode, struct page *page)
53{
54 struct iomap_page *iop = to_iomap_page(page);
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070055 unsigned int nr_blocks = i_blocks_per_page(inode, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070056
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070057 if (iop || nr_blocks <= 1)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070058 return iop;
59
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070060 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
61 GFP_NOFS | __GFP_NOFAIL);
Christoph Hellwig1cea3352019-12-04 09:33:52 -080062 spin_lock_init(&iop->uptodate_lock);
Matthew Wilcox (Oracle)4595a292020-09-25 11:16:53 -070063 if (PageUptodate(page))
64 bitmap_fill(iop->uptodate, nr_blocks);
Guoqing Jiang58aeb732020-06-01 21:47:54 -070065 attach_page_private(page, iop);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070066 return iop;
67}
68
69static void
70iomap_page_release(struct page *page)
71{
Guoqing Jiang58aeb732020-06-01 21:47:54 -070072 struct iomap_page *iop = detach_page_private(page);
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070073 unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070074
75 if (!iop)
76 return;
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -070077 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -070078 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070079 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
80 PageUptodate(page));
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070081 kfree(iop);
82}
83
84/*
85 * Calculate the range inside the page that we actually need to read.
86 */
87static void
88iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
89 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
90{
91 loff_t orig_pos = *pos;
92 loff_t isize = i_size_read(inode);
93 unsigned block_bits = inode->i_blkbits;
94 unsigned block_size = (1 << block_bits);
95 unsigned poff = offset_in_page(*pos);
96 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
97 unsigned first = poff >> block_bits;
98 unsigned last = (poff + plen - 1) >> block_bits;
99
100 /*
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -0700101 * If the block size is smaller than the page size, we need to check the
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700102 * per-block uptodate status and adjust the offset and length if needed
103 * to avoid reading in already uptodate ranges.
104 */
105 if (iop) {
106 unsigned int i;
107
108 /* move forward for each leading block marked uptodate */
109 for (i = first; i <= last; i++) {
110 if (!test_bit(i, iop->uptodate))
111 break;
112 *pos += block_size;
113 poff += block_size;
114 plen -= block_size;
115 first++;
116 }
117
118 /* truncate len if we find any trailing uptodate block(s) */
119 for ( ; i <= last; i++) {
120 if (test_bit(i, iop->uptodate)) {
121 plen -= (last - i + 1) * block_size;
122 last = i - 1;
123 break;
124 }
125 }
126 }
127
128 /*
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -0700129 * If the extent spans the block that contains the i_size, we need to
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700130 * handle both halves separately so that we properly zero data in the
131 * page cache for blocks that are entirely outside of i_size.
132 */
133 if (orig_pos <= isize && orig_pos + length > isize) {
134 unsigned end = offset_in_page(isize - 1) >> block_bits;
135
136 if (first <= end && last > end)
137 plen -= (last - end) * block_size;
138 }
139
140 *offp = poff;
141 *lenp = plen;
142}
143
144static void
Christoph Hellwig1cea3352019-12-04 09:33:52 -0800145iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700146{
147 struct iomap_page *iop = to_iomap_page(page);
148 struct inode *inode = page->mapping->host;
149 unsigned first = off >> inode->i_blkbits;
150 unsigned last = (off + len - 1) >> inode->i_blkbits;
Christoph Hellwig1cea3352019-12-04 09:33:52 -0800151 unsigned long flags;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700152
Christoph Hellwig1cea3352019-12-04 09:33:52 -0800153 spin_lock_irqsave(&iop->uptodate_lock, flags);
Matthew Wilcox (Oracle)b21866f2020-09-21 08:58:40 -0700154 bitmap_set(iop->uptodate, first, last - first + 1);
155 if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
Christoph Hellwig1cea3352019-12-04 09:33:52 -0800156 SetPageUptodate(page);
157 spin_unlock_irqrestore(&iop->uptodate_lock, flags);
158}
159
160static void
161iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
162{
163 if (PageError(page))
164 return;
165
166 if (page_has_private(page))
167 iomap_iop_set_range_uptodate(page, off, len);
168 else
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700169 SetPageUptodate(page);
170}
171
172static void
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700173iomap_read_page_end_io(struct bio_vec *bvec, int error)
174{
175 struct page *page = bvec->bv_page;
176 struct iomap_page *iop = to_iomap_page(page);
177
178 if (unlikely(error)) {
179 ClearPageUptodate(page);
180 SetPageError(page);
181 } else {
182 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
183 }
184
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -0700185 if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
186 unlock_page(page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700187}
188
189static void
190iomap_read_end_io(struct bio *bio)
191{
192 int error = blk_status_to_errno(bio->bi_status);
193 struct bio_vec *bvec;
194 struct bvec_iter_all iter_all;
195
196 bio_for_each_segment_all(bvec, bio, iter_all)
197 iomap_read_page_end_io(bvec, error);
198 bio_put(bio);
199}
200
201struct iomap_readpage_ctx {
202 struct page *cur_page;
203 bool cur_page_in_bio;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700204 struct bio *bio;
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700205 struct readahead_control *rac;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700206};
207
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700208static loff_t iomap_read_inline_data(struct iomap_iter *iter,
209 struct page *page)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700210{
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700211 struct iomap *iomap = iomap_iter_srcmap(iter);
212 size_t size = i_size_read(iter->inode) - iomap->offset;
Matthew Wilcox (Oracle)b4054352021-08-02 14:45:57 -0700213 size_t poff = offset_in_page(iomap->offset);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700214 void *addr;
215
216 if (PageUptodate(page))
Matthew Wilcox (Oracle)b4054352021-08-02 14:45:57 -0700217 return PAGE_SIZE - poff;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700218
Matthew Wilcox (Oracle)ae44f9c2021-08-04 20:07:34 -0700219 if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
220 return -EIO;
Gao Xiang69f4a262021-08-03 09:38:22 -0700221 if (WARN_ON_ONCE(size > PAGE_SIZE -
222 offset_in_page(iomap->inline_data)))
223 return -EIO;
224 if (WARN_ON_ONCE(size > iomap->length))
225 return -EIO;
Matthew Wilcox (Oracle)b4054352021-08-02 14:45:57 -0700226 if (poff > 0)
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700227 iomap_page_create(iter->inode, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700228
Matthew Wilcox (Oracle)ab069d52021-08-04 20:07:33 -0700229 addr = kmap_local_page(page) + poff;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700230 memcpy(addr, iomap->inline_data, size);
Matthew Wilcox (Oracle)b4054352021-08-02 14:45:57 -0700231 memset(addr + size, 0, PAGE_SIZE - poff - size);
Matthew Wilcox (Oracle)ab069d52021-08-04 20:07:33 -0700232 kunmap_local(addr);
Matthew Wilcox (Oracle)b4054352021-08-02 14:45:57 -0700233 iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
234 return PAGE_SIZE - poff;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700235}
236
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700237static inline bool iomap_block_needs_zeroing(struct iomap_iter *iter,
238 loff_t pos)
Christoph Hellwig009d8d82019-10-17 13:12:12 -0700239{
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700240 struct iomap *srcmap = iomap_iter_srcmap(iter);
241
242 return srcmap->type != IOMAP_MAPPED ||
243 (srcmap->flags & IOMAP_F_NEW) ||
244 pos >= i_size_read(iter->inode);
Christoph Hellwig009d8d82019-10-17 13:12:12 -0700245}
246
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700247static loff_t iomap_readpage_iter(struct iomap_iter *iter,
248 struct iomap_readpage_ctx *ctx, loff_t offset)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700249{
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700250 struct iomap *iomap = &iter->iomap;
251 loff_t pos = iter->pos + offset;
252 loff_t length = iomap_length(iter) - offset;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700253 struct page *page = ctx->cur_page;
Andreas Gruenbacher637d3372021-07-15 09:58:05 -0700254 struct iomap_page *iop;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700255 loff_t orig_pos = pos;
256 unsigned poff, plen;
257 sector_t sector;
258
Matthew Wilcox (Oracle)b4054352021-08-02 14:45:57 -0700259 if (iomap->type == IOMAP_INLINE)
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700260 return min(iomap_read_inline_data(iter, page), length);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700261
262 /* zero post-eof blocks as the page may be mapped */
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700263 iop = iomap_page_create(iter->inode, page);
264 iomap_adjust_read_range(iter->inode, iop, &pos, length, &poff, &plen);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700265 if (plen == 0)
266 goto done;
267
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700268 if (iomap_block_needs_zeroing(iter, pos)) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700269 zero_user(page, poff, plen);
270 iomap_set_range_uptodate(page, poff, plen);
271 goto done;
272 }
273
274 ctx->cur_page_in_bio = true;
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -0700275 if (iop)
276 atomic_add(plen, &iop->read_bytes_pending);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700277
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700278 sector = iomap_sector(iomap, pos);
Christoph Hellwigd0364f92021-08-02 14:43:43 -0700279 if (!ctx->bio ||
280 bio_end_sector(ctx->bio) != sector ||
281 bio_add_page(ctx->bio, page, plen, poff) != plen) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700282 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
Matthew Wilcox (Oracle)457df332020-04-02 09:08:53 -0700283 gfp_t orig_gfp = gfp;
Matthew Wilcox (Oracle)5f7136d2021-01-29 04:38:57 +0000284 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700285
286 if (ctx->bio)
287 submit_bio(ctx->bio);
288
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700289 if (ctx->rac) /* same as readahead_gfp_mask */
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700290 gfp |= __GFP_NORETRY | __GFP_NOWARN;
Matthew Wilcox (Oracle)5f7136d2021-01-29 04:38:57 +0000291 ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
Matthew Wilcox (Oracle)457df332020-04-02 09:08:53 -0700292 /*
293 * If the bio_alloc fails, try it again for a single page to
294 * avoid having to deal with partial page reads. This emulates
295 * what do_mpage_readpage does.
296 */
297 if (!ctx->bio)
298 ctx->bio = bio_alloc(orig_gfp, 1);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700299 ctx->bio->bi_opf = REQ_OP_READ;
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700300 if (ctx->rac)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700301 ctx->bio->bi_opf |= REQ_RAHEAD;
302 ctx->bio->bi_iter.bi_sector = sector;
303 bio_set_dev(ctx->bio, iomap->bdev);
304 ctx->bio->bi_end_io = iomap_read_end_io;
Christoph Hellwigd0364f92021-08-02 14:43:43 -0700305 __bio_add_page(ctx->bio, page, plen, poff);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700306 }
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700307done:
308 /*
309 * Move the caller beyond our range so that it keeps making progress.
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -0700310 * For that, we have to include any leading non-uptodate ranges, but
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700311 * we can skip trailing ones as they will be handled in the next
312 * iteration.
313 */
314 return pos - orig_pos + plen;
315}
316
317int
318iomap_readpage(struct page *page, const struct iomap_ops *ops)
319{
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700320 struct iomap_iter iter = {
321 .inode = page->mapping->host,
322 .pos = page_offset(page),
323 .len = PAGE_SIZE,
324 };
325 struct iomap_readpage_ctx ctx = {
326 .cur_page = page,
327 };
328 int ret;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700329
Christoph Hellwig9e91c572019-10-17 13:12:13 -0700330 trace_iomap_readpage(page->mapping->host, 1);
331
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700332 while ((ret = iomap_iter(&iter, ops)) > 0)
333 iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
334
335 if (ret < 0)
336 SetPageError(page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700337
338 if (ctx.bio) {
339 submit_bio(ctx.bio);
340 WARN_ON_ONCE(!ctx.cur_page_in_bio);
341 } else {
342 WARN_ON_ONCE(ctx.cur_page_in_bio);
343 unlock_page(page);
344 }
345
346 /*
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -0700347 * Just like mpage_readahead and block_read_full_page, we always
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700348 * return 0 and just mark the page as PageError on errors. This
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -0700349 * should be cleaned up throughout the stack eventually.
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700350 */
351 return 0;
352}
353EXPORT_SYMBOL_GPL(iomap_readpage);
354
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700355static loff_t iomap_readahead_iter(struct iomap_iter *iter,
356 struct iomap_readpage_ctx *ctx)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700357{
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700358 loff_t length = iomap_length(iter);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700359 loff_t done, ret;
360
361 for (done = 0; done < length; done += ret) {
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700362 if (ctx->cur_page && offset_in_page(iter->pos + done) == 0) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700363 if (!ctx->cur_page_in_bio)
364 unlock_page(ctx->cur_page);
365 put_page(ctx->cur_page);
366 ctx->cur_page = NULL;
367 }
368 if (!ctx->cur_page) {
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700369 ctx->cur_page = readahead_page(ctx->rac);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700370 ctx->cur_page_in_bio = false;
371 }
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700372 ret = iomap_readpage_iter(iter, ctx, done);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700373 }
374
375 return done;
376}
377
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700378/**
379 * iomap_readahead - Attempt to read pages from a file.
380 * @rac: Describes the pages to be read.
381 * @ops: The operations vector for the filesystem.
382 *
383 * This function is for filesystems to call to implement their readahead
384 * address_space operation.
385 *
386 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
387 * blocks from disc), and may wait for it. The caller may be trying to
388 * access a different page, and so sleeping excessively should be avoided.
389 * It may allocate memory, but should avoid costly allocations. This
390 * function is called with memalloc_nofs set, so allocations will not cause
391 * the filesystem to be reentered.
392 */
393void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700394{
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700395 struct iomap_iter iter = {
396 .inode = rac->mapping->host,
397 .pos = readahead_pos(rac),
398 .len = readahead_length(rac),
399 };
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700400 struct iomap_readpage_ctx ctx = {
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700401 .rac = rac,
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700402 };
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700403
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700404 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
Christoph Hellwig9e91c572019-10-17 13:12:13 -0700405
Christoph Hellwigf6d480002021-08-10 18:33:08 -0700406 while (iomap_iter(&iter, ops) > 0)
407 iter.processed = iomap_readahead_iter(&iter, &ctx);
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700408
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700409 if (ctx.bio)
410 submit_bio(ctx.bio);
411 if (ctx.cur_page) {
412 if (!ctx.cur_page_in_bio)
413 unlock_page(ctx.cur_page);
414 put_page(ctx.cur_page);
415 }
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700416}
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700417EXPORT_SYMBOL_GPL(iomap_readahead);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700418
419/*
420 * iomap_is_partially_uptodate checks whether blocks within a page are
421 * uptodate or not.
422 *
423 * Returns true if all blocks which correspond to a file portion
424 * we want to read within the page are uptodate.
425 */
426int
427iomap_is_partially_uptodate(struct page *page, unsigned long from,
428 unsigned long count)
429{
430 struct iomap_page *iop = to_iomap_page(page);
431 struct inode *inode = page->mapping->host;
432 unsigned len, first, last;
433 unsigned i;
434
435 /* Limit range to one page */
436 len = min_t(unsigned, PAGE_SIZE - from, count);
437
438 /* First and last blocks in range within page */
439 first = from >> inode->i_blkbits;
440 last = (from + len - 1) >> inode->i_blkbits;
441
442 if (iop) {
443 for (i = first; i <= last; i++)
444 if (!test_bit(i, iop->uptodate))
445 return 0;
446 return 1;
447 }
448
449 return 0;
450}
451EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
452
453int
454iomap_releasepage(struct page *page, gfp_t gfp_mask)
455{
Matthew Wilcox (Oracle)1ac99452020-03-05 07:21:43 -0800456 trace_iomap_releasepage(page->mapping->host, page_offset(page),
457 PAGE_SIZE);
Christoph Hellwig9e91c572019-10-17 13:12:13 -0700458
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700459 /*
460 * mm accommodates an old ext3 case where clean pages might not have had
461 * the dirty bit cleared. Thus, it can send actual dirty pages to
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -0700462 * ->releasepage() via shrink_active_list(); skip those here.
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700463 */
464 if (PageDirty(page) || PageWriteback(page))
465 return 0;
466 iomap_page_release(page);
467 return 1;
468}
469EXPORT_SYMBOL_GPL(iomap_releasepage);
470
471void
472iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
473{
Matthew Wilcox (Oracle)1ac99452020-03-05 07:21:43 -0800474 trace_iomap_invalidatepage(page->mapping->host, offset, len);
Christoph Hellwig9e91c572019-10-17 13:12:13 -0700475
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700476 /*
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -0700477 * If we're invalidating the entire page, clear the dirty state from it
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700478 * and release it to avoid unnecessary buildup of the LRU.
479 */
480 if (offset == 0 && len == PAGE_SIZE) {
481 WARN_ON_ONCE(PageWriteback(page));
482 cancel_dirty_page(page);
483 iomap_page_release(page);
484 }
485}
486EXPORT_SYMBOL_GPL(iomap_invalidatepage);
487
488#ifdef CONFIG_MIGRATION
489int
490iomap_migrate_page(struct address_space *mapping, struct page *newpage,
491 struct page *page, enum migrate_mode mode)
492{
493 int ret;
494
Linus Torvalds26473f82019-07-19 11:38:12 -0700495 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700496 if (ret != MIGRATEPAGE_SUCCESS)
497 return ret;
498
Guoqing Jiang58aeb732020-06-01 21:47:54 -0700499 if (page_has_private(page))
500 attach_page_private(newpage, detach_page_private(page));
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700501
502 if (mode != MIGRATE_SYNC_NO_COPY)
503 migrate_page_copy(newpage, page);
504 else
505 migrate_page_states(newpage, page);
506 return MIGRATEPAGE_SUCCESS;
507}
508EXPORT_SYMBOL_GPL(iomap_migrate_page);
509#endif /* CONFIG_MIGRATION */
510
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700511enum {
512 IOMAP_WRITE_F_UNSHARE = (1 << 0),
513};
514
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700515static void
516iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
517{
518 loff_t i_size = i_size_read(inode);
519
520 /*
521 * Only truncate newly allocated pages beyoned EOF, even if the
522 * write started inside the existing inode size.
523 */
524 if (pos + len > i_size)
525 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
526}
527
528static int
Christoph Hellwigd3b40432019-10-18 16:42:24 -0700529iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
Christoph Hellwig1acd9e92021-08-10 18:33:06 -0700530 unsigned plen, const struct iomap *iomap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700531{
532 struct bio_vec bvec;
533 struct bio bio;
534
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700535 bio_init(&bio, &bvec, 1);
536 bio.bi_opf = REQ_OP_READ;
537 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
538 bio_set_dev(&bio, iomap->bdev);
539 __bio_add_page(&bio, page, plen, poff);
540 return submit_bio_wait(&bio);
541}
542
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700543static int __iomap_write_begin(struct iomap_iter *iter, loff_t pos,
544 unsigned len, int flags, struct page *page)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700545{
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700546 struct iomap *srcmap = iomap_iter_srcmap(iter);
547 struct iomap_page *iop = iomap_page_create(iter->inode, page);
548 loff_t block_size = i_blocksize(iter->inode);
Nikolay Borisov6cc19c52020-09-10 08:38:06 -0700549 loff_t block_start = round_down(pos, block_size);
550 loff_t block_end = round_up(pos + len, block_size);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700551 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700552
553 if (PageUptodate(page))
554 return 0;
Matthew Wilcox (Oracle)e6e7ca92020-09-10 08:26:17 -0700555 ClearPageError(page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700556
557 do {
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700558 iomap_adjust_read_range(iter->inode, iop, &block_start,
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700559 block_end - block_start, &poff, &plen);
560 if (plen == 0)
561 break;
562
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700563 if (!(flags & IOMAP_WRITE_F_UNSHARE) &&
564 (from <= poff || from >= poff + plen) &&
Christoph Hellwigd3b40432019-10-18 16:42:24 -0700565 (to <= poff || to >= poff + plen))
566 continue;
567
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700568 if (iomap_block_needs_zeroing(iter, block_start)) {
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700569 if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
570 return -EIO;
Christoph Hellwigd3b40432019-10-18 16:42:24 -0700571 zero_user_segments(page, poff, from, to, poff + plen);
Matthew Wilcox (Oracle)14284fe2020-09-10 08:26:18 -0700572 } else {
573 int status = iomap_read_page_sync(block_start, page,
574 poff, plen, srcmap);
575 if (status)
576 return status;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700577 }
Matthew Wilcox (Oracle)14284fe2020-09-10 08:26:18 -0700578 iomap_set_range_uptodate(page, poff, plen);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700579 } while ((block_start += plen) < block_end);
580
Christoph Hellwigd3b40432019-10-18 16:42:24 -0700581 return 0;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700582}
583
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700584static int iomap_write_begin_inline(struct iomap_iter *iter,
585 struct page *page)
Gao Xiang69f4a262021-08-03 09:38:22 -0700586{
Matthew Wilcox (Oracle)b4054352021-08-02 14:45:57 -0700587 int ret;
588
Gao Xiang69f4a262021-08-03 09:38:22 -0700589 /* needs more work for the tailpacking case; disable for now */
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700590 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
Gao Xiang69f4a262021-08-03 09:38:22 -0700591 return -EIO;
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700592 ret = iomap_read_inline_data(iter, page);
Matthew Wilcox (Oracle)b4054352021-08-02 14:45:57 -0700593 if (ret < 0)
594 return ret;
595 return 0;
Gao Xiang69f4a262021-08-03 09:38:22 -0700596}
597
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700598static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, unsigned len,
599 unsigned flags, struct page **pagep)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700600{
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700601 const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
602 struct iomap *srcmap = iomap_iter_srcmap(iter);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700603 struct page *page;
604 int status = 0;
605
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700606 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
607 if (srcmap != &iter->iomap)
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700608 BUG_ON(pos + len > srcmap->offset + srcmap->length);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700609
610 if (fatal_signal_pending(current))
611 return -EINTR;
612
613 if (page_ops && page_ops->page_prepare) {
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700614 status = page_ops->page_prepare(iter->inode, pos, len);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700615 if (status)
616 return status;
617 }
618
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700619 page = grab_cache_page_write_begin(iter->inode->i_mapping,
620 pos >> PAGE_SHIFT, AOP_FLAG_NOFS);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700621 if (!page) {
622 status = -ENOMEM;
623 goto out_no_page;
624 }
625
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700626 if (srcmap->type == IOMAP_INLINE)
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700627 status = iomap_write_begin_inline(iter, page);
628 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700629 status = __block_write_begin_int(page, pos, len, NULL, srcmap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700630 else
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700631 status = __iomap_write_begin(iter, pos, len, flags, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700632
633 if (unlikely(status))
634 goto out_unlock;
635
636 *pagep = page;
637 return 0;
638
639out_unlock:
640 unlock_page(page);
641 put_page(page);
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700642 iomap_write_failed(iter->inode, pos, len);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700643
644out_no_page:
645 if (page_ops && page_ops->page_done)
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700646 page_ops->page_done(iter->inode, pos, 0, NULL);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700647 return status;
648}
649
Matthew Wilcox (Oracle)e25ba8c2020-09-21 08:58:41 -0700650static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
651 size_t copied, struct page *page)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700652{
653 flush_dcache_page(page);
654
655 /*
656 * The blocks that were entirely written will now be uptodate, so we
657 * don't have to worry about a readpage reading them and overwriting a
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -0700658 * partial write. However, if we've encountered a short write and only
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700659 * partially written into a block, it will not be marked uptodate, so a
660 * readpage might come in and destroy our partial write.
661 *
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -0700662 * Do the simplest thing and just treat any short write to a
663 * non-uptodate page as a zero-length write, and force the caller to
664 * redo the whole thing.
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700665 */
666 if (unlikely(copied < len && !PageUptodate(page)))
667 return 0;
668 iomap_set_range_uptodate(page, offset_in_page(pos), len);
Matthew Wilcox (Oracle)fd7353f2021-06-28 19:36:21 -0700669 __set_page_dirty_nobuffers(page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700670 return copied;
671}
672
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700673static size_t iomap_write_end_inline(struct iomap_iter *iter, struct page *page,
674 loff_t pos, size_t copied)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700675{
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700676 struct iomap *iomap = &iter->iomap;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700677 void *addr;
678
679 WARN_ON_ONCE(!PageUptodate(page));
Gao Xiang69f4a262021-08-03 09:38:22 -0700680 BUG_ON(!iomap_inline_data_valid(iomap));
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700681
Matthew Wilcox (Oracle)7ed3cd12020-09-21 08:58:38 -0700682 flush_dcache_page(page);
Matthew Wilcox (Oracle)ab069d52021-08-04 20:07:33 -0700683 addr = kmap_local_page(page) + pos;
684 memcpy(iomap_inline_data(iomap, pos), addr, copied);
685 kunmap_local(addr);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700686
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700687 mark_inode_dirty(iter->inode);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700688 return copied;
689}
690
Matthew Wilcox (Oracle)e25ba8c2020-09-21 08:58:41 -0700691/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700692static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
693 size_t copied, struct page *page)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700694{
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700695 const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
696 struct iomap *srcmap = iomap_iter_srcmap(iter);
697 loff_t old_size = iter->inode->i_size;
Matthew Wilcox (Oracle)e25ba8c2020-09-21 08:58:41 -0700698 size_t ret;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700699
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700700 if (srcmap->type == IOMAP_INLINE) {
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700701 ret = iomap_write_end_inline(iter, page, pos, copied);
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700702 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700703 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
704 copied, page, NULL);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700705 } else {
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700706 ret = __iomap_write_end(iter->inode, pos, len, copied, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700707 }
708
709 /*
710 * Update the in-memory inode size after copying the data into the page
711 * cache. It's up to the file system to write the updated size to disk,
712 * preferably after I/O completion so that no stale data is exposed.
713 */
714 if (pos + ret > old_size) {
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700715 i_size_write(iter->inode, pos + ret);
716 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700717 }
718 unlock_page(page);
719
720 if (old_size < pos)
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700721 pagecache_isize_extended(iter->inode, old_size, pos);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700722 if (page_ops && page_ops->page_done)
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700723 page_ops->page_done(iter->inode, pos, ret, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700724 put_page(page);
725
726 if (ret < len)
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700727 iomap_write_failed(iter->inode, pos, len);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700728 return ret;
729}
730
Christoph Hellwigce83a022021-08-10 18:33:08 -0700731static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700732{
Christoph Hellwigce83a022021-08-10 18:33:08 -0700733 loff_t length = iomap_length(iter);
734 loff_t pos = iter->pos;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700735 ssize_t written = 0;
Christoph Hellwigce83a022021-08-10 18:33:08 -0700736 long status = 0;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700737
738 do {
739 struct page *page;
740 unsigned long offset; /* Offset into pagecache page */
741 unsigned long bytes; /* Bytes to write to page */
742 size_t copied; /* Bytes copied from user */
743
744 offset = offset_in_page(pos);
745 bytes = min_t(unsigned long, PAGE_SIZE - offset,
746 iov_iter_count(i));
747again:
748 if (bytes > length)
749 bytes = length;
750
751 /*
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -0700752 * Bring in the user page that we'll copy from _first_.
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700753 * Otherwise there's a nasty deadlock on copying from the
754 * same page as we're writing to, without it being marked
755 * up-to-date.
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700756 */
757 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
758 status = -EFAULT;
759 break;
760 }
761
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700762 status = iomap_write_begin(iter, pos, bytes, 0, &page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700763 if (unlikely(status))
764 break;
765
Christoph Hellwigce83a022021-08-10 18:33:08 -0700766 if (mapping_writably_mapped(iter->inode->i_mapping))
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700767 flush_dcache_page(page);
768
Al Virof0b65f32021-04-30 10:26:41 -0400769 copied = copy_page_from_iter_atomic(page, offset, bytes, i);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700770
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700771 status = iomap_write_end(iter, pos, bytes, copied, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700772
Al Virof0b65f32021-04-30 10:26:41 -0400773 if (unlikely(copied != status))
774 iov_iter_revert(i, copied - status);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700775
Al Virof0b65f32021-04-30 10:26:41 -0400776 cond_resched();
Al Virobc1bb412021-05-31 00:32:44 -0400777 if (unlikely(status == 0)) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700778 /*
Al Virobc1bb412021-05-31 00:32:44 -0400779 * A short copy made iomap_write_end() reject the
780 * thing entirely. Might be memory poisoning
781 * halfway through, might be a race with munmap,
782 * might be severe memory pressure.
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700783 */
Al Virobc1bb412021-05-31 00:32:44 -0400784 if (copied)
785 bytes = copied;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700786 goto again;
787 }
Al Virof0b65f32021-04-30 10:26:41 -0400788 pos += status;
789 written += status;
790 length -= status;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700791
Christoph Hellwigce83a022021-08-10 18:33:08 -0700792 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700793 } while (iov_iter_count(i) && length);
794
795 return written ? written : status;
796}
797
798ssize_t
Christoph Hellwigce83a022021-08-10 18:33:08 -0700799iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700800 const struct iomap_ops *ops)
801{
Christoph Hellwigce83a022021-08-10 18:33:08 -0700802 struct iomap_iter iter = {
803 .inode = iocb->ki_filp->f_mapping->host,
804 .pos = iocb->ki_pos,
805 .len = iov_iter_count(i),
806 .flags = IOMAP_WRITE,
807 };
808 int ret;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700809
Christoph Hellwigce83a022021-08-10 18:33:08 -0700810 while ((ret = iomap_iter(&iter, ops)) > 0)
811 iter.processed = iomap_write_iter(&iter, i);
812 if (iter.pos == iocb->ki_pos)
813 return ret;
814 return iter.pos - iocb->ki_pos;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700815}
816EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
817
Christoph Hellwig8fc274d2021-08-10 18:33:09 -0700818static loff_t iomap_unshare_iter(struct iomap_iter *iter)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700819{
Christoph Hellwig8fc274d2021-08-10 18:33:09 -0700820 struct iomap *iomap = &iter->iomap;
821 struct iomap *srcmap = iomap_iter_srcmap(iter);
822 loff_t pos = iter->pos;
823 loff_t length = iomap_length(iter);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700824 long status = 0;
Matthew Wilcox (Oracle)d4ff3b22020-06-08 20:58:29 -0700825 loff_t written = 0;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700826
Christoph Hellwig3590c4d2019-10-18 16:41:34 -0700827 /* don't bother with blocks that are not shared to start with */
828 if (!(iomap->flags & IOMAP_F_SHARED))
829 return length;
830 /* don't bother with holes or unwritten extents */
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700831 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
Christoph Hellwig3590c4d2019-10-18 16:41:34 -0700832 return length;
833
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700834 do {
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700835 unsigned long offset = offset_in_page(pos);
836 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
837 struct page *page;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700838
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700839 status = iomap_write_begin(iter, pos, bytes,
840 IOMAP_WRITE_F_UNSHARE, &page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700841 if (unlikely(status))
842 return status;
843
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700844 status = iomap_write_end(iter, pos, bytes, bytes, page);
Matthew Wilcox (Oracle)e25ba8c2020-09-21 08:58:41 -0700845 if (WARN_ON_ONCE(status == 0))
846 return -EIO;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700847
848 cond_resched();
849
850 pos += status;
851 written += status;
852 length -= status;
853
Christoph Hellwig8fc274d2021-08-10 18:33:09 -0700854 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700855 } while (length);
856
857 return written;
858}
859
860int
Christoph Hellwig3590c4d2019-10-18 16:41:34 -0700861iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700862 const struct iomap_ops *ops)
863{
Christoph Hellwig8fc274d2021-08-10 18:33:09 -0700864 struct iomap_iter iter = {
865 .inode = inode,
866 .pos = pos,
867 .len = len,
868 .flags = IOMAP_WRITE,
869 };
870 int ret;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700871
Christoph Hellwig8fc274d2021-08-10 18:33:09 -0700872 while ((ret = iomap_iter(&iter, ops)) > 0)
873 iter.processed = iomap_unshare_iter(&iter);
874 return ret;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700875}
Christoph Hellwig3590c4d2019-10-18 16:41:34 -0700876EXPORT_SYMBOL_GPL(iomap_file_unshare);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700877
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700878static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700879{
880 struct page *page;
881 int status;
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700882 unsigned offset = offset_in_page(pos);
883 unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700884
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700885 status = iomap_write_begin(iter, pos, bytes, 0, &page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700886 if (status)
887 return status;
888
889 zero_user(page, offset, bytes);
890 mark_page_accessed(page);
891
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700892 return iomap_write_end(iter, pos, bytes, bytes, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700893}
894
Christoph Hellwig2aa30482021-08-10 18:33:09 -0700895static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700896{
Christoph Hellwig2aa30482021-08-10 18:33:09 -0700897 struct iomap *iomap = &iter->iomap;
898 struct iomap *srcmap = iomap_iter_srcmap(iter);
899 loff_t pos = iter->pos;
900 loff_t length = iomap_length(iter);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700901 loff_t written = 0;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700902
903 /* already zeroed? we're done. */
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700904 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700905 return length;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700906
907 do {
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700908 s64 bytes;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700909
Christoph Hellwig2aa30482021-08-10 18:33:09 -0700910 if (IS_DAX(iter->inode))
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700911 bytes = dax_iomap_zero(pos, length, iomap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700912 else
Christoph Hellwig1b5c1e32021-08-10 18:33:14 -0700913 bytes = __iomap_zero_iter(iter, pos, length);
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700914 if (bytes < 0)
915 return bytes;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700916
917 pos += bytes;
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700918 length -= bytes;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700919 written += bytes;
920 if (did_zero)
921 *did_zero = true;
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700922 } while (length > 0);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700923
924 return written;
925}
926
927int
928iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
929 const struct iomap_ops *ops)
930{
Christoph Hellwig2aa30482021-08-10 18:33:09 -0700931 struct iomap_iter iter = {
932 .inode = inode,
933 .pos = pos,
934 .len = len,
935 .flags = IOMAP_ZERO,
936 };
937 int ret;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700938
Christoph Hellwig2aa30482021-08-10 18:33:09 -0700939 while ((ret = iomap_iter(&iter, ops)) > 0)
940 iter.processed = iomap_zero_iter(&iter, did_zero);
941 return ret;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700942}
943EXPORT_SYMBOL_GPL(iomap_zero_range);
944
945int
946iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
947 const struct iomap_ops *ops)
948{
949 unsigned int blocksize = i_blocksize(inode);
950 unsigned int off = pos & (blocksize - 1);
951
952 /* Block boundary? Nothing to do */
953 if (!off)
954 return 0;
955 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
956}
957EXPORT_SYMBOL_GPL(iomap_truncate_page);
958
Christoph Hellwig253564b2021-08-10 18:33:09 -0700959static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
960 struct page *page)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700961{
Christoph Hellwig253564b2021-08-10 18:33:09 -0700962 loff_t length = iomap_length(iter);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700963 int ret;
964
Christoph Hellwig253564b2021-08-10 18:33:09 -0700965 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
966 ret = __block_write_begin_int(page, iter->pos, length, NULL,
967 &iter->iomap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700968 if (ret)
969 return ret;
970 block_commit_write(page, 0, length);
971 } else {
972 WARN_ON_ONCE(!PageUptodate(page));
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700973 set_page_dirty(page);
974 }
975
976 return length;
977}
978
979vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
980{
Christoph Hellwig253564b2021-08-10 18:33:09 -0700981 struct iomap_iter iter = {
982 .inode = file_inode(vmf->vma->vm_file),
983 .flags = IOMAP_WRITE | IOMAP_FAULT,
984 };
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700985 struct page *page = vmf->page;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700986 ssize_t ret;
987
988 lock_page(page);
Christoph Hellwig253564b2021-08-10 18:33:09 -0700989 ret = page_mkwrite_check_truncate(page, iter.inode);
Andreas Gruenbacher243145b2020-01-06 08:58:23 -0800990 if (ret < 0)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700991 goto out_unlock;
Christoph Hellwig253564b2021-08-10 18:33:09 -0700992 iter.pos = page_offset(page);
993 iter.len = ret;
994 while ((ret = iomap_iter(&iter, ops)) > 0)
995 iter.processed = iomap_page_mkwrite_iter(&iter, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700996
Christoph Hellwig253564b2021-08-10 18:33:09 -0700997 if (ret < 0)
998 goto out_unlock;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700999 wait_for_stable_page(page);
1000 return VM_FAULT_LOCKED;
1001out_unlock:
1002 unlock_page(page);
1003 return block_page_mkwrite_return(ret);
1004}
1005EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001006
1007static void
Christoph Hellwig48d64cd2019-10-17 13:12:22 -07001008iomap_finish_page_writeback(struct inode *inode, struct page *page,
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001009 int error, unsigned int len)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001010{
Christoph Hellwig48d64cd2019-10-17 13:12:22 -07001011 struct iomap_page *iop = to_iomap_page(page);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001012
1013 if (error) {
Christoph Hellwig48d64cd2019-10-17 13:12:22 -07001014 SetPageError(page);
Darrick J. Wongb69eea82021-08-10 18:32:55 -07001015 mapping_set_error(inode->i_mapping, error);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001016 }
1017
Matthew Wilcox (Oracle)24addd82020-09-21 08:58:39 -07001018 WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001019 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001020
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001021 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
Christoph Hellwig48d64cd2019-10-17 13:12:22 -07001022 end_page_writeback(page);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001023}
1024
1025/*
1026 * We're now finished for good with this ioend structure. Update the page
1027 * state, release holds on bios, and finally free up memory. Do not use the
1028 * ioend after this.
1029 */
1030static void
1031iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1032{
1033 struct inode *inode = ioend->io_inode;
1034 struct bio *bio = &ioend->io_inline_bio;
1035 struct bio *last = ioend->io_bio, *next;
1036 u64 start = bio->bi_iter.bi_sector;
Zorro Langc2757792019-12-04 22:59:02 -08001037 loff_t offset = ioend->io_offset;
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001038 bool quiet = bio_flagged(bio, BIO_QUIET);
1039
1040 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1041 struct bio_vec *bv;
1042 struct bvec_iter_all iter_all;
1043
1044 /*
1045 * For the last bio, bi_private points to the ioend, so we
1046 * need to explicitly end the iteration here.
1047 */
1048 if (bio == last)
1049 next = NULL;
1050 else
1051 next = bio->bi_private;
1052
1053 /* walk each page on bio, ending page IO on them */
1054 bio_for_each_segment_all(bv, bio, iter_all)
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001055 iomap_finish_page_writeback(inode, bv->bv_page, error,
1056 bv->bv_len);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001057 bio_put(bio);
1058 }
Zorro Langc2757792019-12-04 22:59:02 -08001059 /* The ioend has been freed by bio_put() */
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001060
1061 if (unlikely(error && !quiet)) {
1062 printk_ratelimited(KERN_ERR
Darrick J. Wong9cd0ed62019-10-17 14:02:07 -07001063"%s: writeback error on inode %lu, offset %lld, sector %llu",
Zorro Langc2757792019-12-04 22:59:02 -08001064 inode->i_sb->s_id, inode->i_ino, offset, start);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001065 }
1066}
1067
1068void
1069iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1070{
1071 struct list_head tmp;
1072
1073 list_replace_init(&ioend->io_list, &tmp);
1074 iomap_finish_ioend(ioend, error);
1075
1076 while (!list_empty(&tmp)) {
1077 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1078 list_del_init(&ioend->io_list);
1079 iomap_finish_ioend(ioend, error);
1080 }
1081}
1082EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1083
1084/*
1085 * We can merge two adjacent ioends if they have the same set of work to do.
1086 */
1087static bool
1088iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1089{
1090 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1091 return false;
1092 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1093 (next->io_flags & IOMAP_F_SHARED))
1094 return false;
1095 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1096 (next->io_type == IOMAP_UNWRITTEN))
1097 return false;
1098 if (ioend->io_offset + ioend->io_size != next->io_offset)
1099 return false;
1100 return true;
1101}
1102
1103void
Brian Foster6e552492021-05-04 08:54:29 -07001104iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001105{
1106 struct iomap_ioend *next;
1107
1108 INIT_LIST_HEAD(&ioend->io_list);
1109
1110 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1111 io_list))) {
1112 if (!iomap_ioend_can_merge(ioend, next))
1113 break;
1114 list_move_tail(&next->io_list, &ioend->io_list);
1115 ioend->io_size += next->io_size;
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001116 }
1117}
1118EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1119
1120static int
Sami Tolvanen4f0f5862021-04-08 11:28:34 -07001121iomap_ioend_compare(void *priv, const struct list_head *a,
1122 const struct list_head *b)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001123{
Christoph Hellwigb3d423e2019-10-17 13:12:20 -07001124 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1125 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001126
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001127 if (ia->io_offset < ib->io_offset)
1128 return -1;
Christoph Hellwigb3d423e2019-10-17 13:12:20 -07001129 if (ia->io_offset > ib->io_offset)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001130 return 1;
1131 return 0;
1132}
1133
1134void
1135iomap_sort_ioends(struct list_head *ioend_list)
1136{
1137 list_sort(NULL, ioend_list, iomap_ioend_compare);
1138}
1139EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1140
1141static void iomap_writepage_end_bio(struct bio *bio)
1142{
1143 struct iomap_ioend *ioend = bio->bi_private;
1144
1145 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1146}
1147
1148/*
1149 * Submit the final bio for an ioend.
1150 *
1151 * If @error is non-zero, it means that we have a situation where some part of
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001152 * the submission process has failed after we've marked pages for writeback
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001153 * and unlocked them. In this situation, we need to fail the bio instead of
1154 * submitting it. This typically only happens on a filesystem shutdown.
1155 */
1156static int
1157iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1158 int error)
1159{
1160 ioend->io_bio->bi_private = ioend;
1161 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1162
1163 if (wpc->ops->prepare_ioend)
1164 error = wpc->ops->prepare_ioend(ioend, error);
1165 if (error) {
1166 /*
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001167 * If we're failing the IO now, just mark the ioend with an
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001168 * error and finish it. This will run IO completion immediately
1169 * as there is only one reference to the ioend at this point in
1170 * time.
1171 */
1172 ioend->io_bio->bi_status = errno_to_blk_status(error);
1173 bio_endio(ioend->io_bio);
1174 return error;
1175 }
1176
1177 submit_bio(ioend->io_bio);
1178 return 0;
1179}
1180
1181static struct iomap_ioend *
1182iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1183 loff_t offset, sector_t sector, struct writeback_control *wbc)
1184{
1185 struct iomap_ioend *ioend;
1186 struct bio *bio;
1187
Christoph Hellwiga8affc02021-03-11 12:01:37 +01001188 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001189 bio_set_dev(bio, wpc->iomap.bdev);
1190 bio->bi_iter.bi_sector = sector;
1191 bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
1192 bio->bi_write_hint = inode->i_write_hint;
1193 wbc_init_bio(wbc, bio);
1194
1195 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1196 INIT_LIST_HEAD(&ioend->io_list);
1197 ioend->io_type = wpc->iomap.type;
1198 ioend->io_flags = wpc->iomap.flags;
1199 ioend->io_inode = inode;
1200 ioend->io_size = 0;
1201 ioend->io_offset = offset;
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001202 ioend->io_bio = bio;
1203 return ioend;
1204}
1205
1206/*
1207 * Allocate a new bio, and chain the old bio to the new one.
1208 *
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001209 * Note that we have to perform the chaining in this unintuitive order
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001210 * so that the bi_private linkage is set up in the right direction for the
1211 * traversal in iomap_finish_ioend().
1212 */
1213static struct bio *
1214iomap_chain_bio(struct bio *prev)
1215{
1216 struct bio *new;
1217
Christoph Hellwiga8affc02021-03-11 12:01:37 +01001218 new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001219 bio_copy_dev(new, prev);/* also copies over blkcg information */
1220 new->bi_iter.bi_sector = bio_end_sector(prev);
1221 new->bi_opf = prev->bi_opf;
1222 new->bi_write_hint = prev->bi_write_hint;
1223
1224 bio_chain(prev, new);
1225 bio_get(prev); /* for iomap_finish_ioend */
1226 submit_bio(prev);
1227 return new;
1228}
1229
1230static bool
1231iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1232 sector_t sector)
1233{
1234 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1235 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1236 return false;
1237 if (wpc->iomap.type != wpc->ioend->io_type)
1238 return false;
1239 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1240 return false;
1241 if (sector != bio_end_sector(wpc->ioend->io_bio))
1242 return false;
1243 return true;
1244}
1245
1246/*
1247 * Test to see if we have an existing ioend structure that we could append to
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001248 * first; otherwise finish off the current ioend and start another.
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001249 */
1250static void
1251iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
1252 struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1253 struct writeback_control *wbc, struct list_head *iolist)
1254{
1255 sector_t sector = iomap_sector(&wpc->iomap, offset);
1256 unsigned len = i_blocksize(inode);
1257 unsigned poff = offset & (PAGE_SIZE - 1);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001258
1259 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
1260 if (wpc->ioend)
1261 list_add(&wpc->ioend->io_list, iolist);
1262 wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
1263 }
1264
Christoph Hellwigc1b79f12021-08-02 14:43:43 -07001265 if (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len) {
1266 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1267 __bio_add_page(wpc->ioend->io_bio, page, len, poff);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001268 }
1269
Christoph Hellwigc1b79f12021-08-02 14:43:43 -07001270 if (iop)
1271 atomic_add(len, &iop->write_bytes_pending);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001272 wpc->ioend->io_size += len;
1273 wbc_account_cgroup_owner(wbc, page, len);
1274}
1275
1276/*
1277 * We implement an immediate ioend submission policy here to avoid needing to
1278 * chain multiple ioends and hence nest mempool allocations which can violate
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001279 * the forward progress guarantees we need to provide. The current ioend we're
1280 * adding blocks to is cached in the writepage context, and if the new block
1281 * doesn't append to the cached ioend, it will create a new ioend and cache that
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001282 * instead.
1283 *
1284 * If a new ioend is created and cached, the old ioend is returned and queued
1285 * locally for submission once the entire page is processed or an error has been
1286 * detected. While ioends are submitted immediately after they are completed,
1287 * batching optimisations are provided by higher level block plugging.
1288 *
1289 * At the end of a writeback pass, there will be a cached ioend remaining on the
1290 * writepage context that the caller will need to submit.
1291 */
1292static int
1293iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1294 struct writeback_control *wbc, struct inode *inode,
1295 struct page *page, u64 end_offset)
1296{
Andreas Gruenbacher8e1bcef2021-07-15 09:58:05 -07001297 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001298 struct iomap_ioend *ioend, *next;
1299 unsigned len = i_blocksize(inode);
1300 u64 file_offset; /* file offset of page */
1301 int error = 0, count = 0, i;
1302 LIST_HEAD(submit_list);
1303
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001304 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001305
1306 /*
1307 * Walk through the page to find areas to write back. If we run off the
1308 * end of the current map or find the current map invalid, grab a new
1309 * one.
1310 */
1311 for (i = 0, file_offset = page_offset(page);
1312 i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
1313 i++, file_offset += len) {
1314 if (iop && !test_bit(i, iop->uptodate))
1315 continue;
1316
1317 error = wpc->ops->map_blocks(wpc, inode, file_offset);
1318 if (error)
1319 break;
Christoph Hellwig3e19e6f2019-10-17 13:12:17 -07001320 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1321 continue;
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001322 if (wpc->iomap.type == IOMAP_HOLE)
1323 continue;
1324 iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
1325 &submit_list);
1326 count++;
1327 }
1328
1329 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1330 WARN_ON_ONCE(!PageLocked(page));
1331 WARN_ON_ONCE(PageWriteback(page));
Brian Foster50e7d6c2020-10-29 14:30:49 -07001332 WARN_ON_ONCE(PageDirty(page));
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001333
1334 /*
1335 * We cannot cancel the ioend directly here on error. We may have
1336 * already set other pages under writeback and hence we have to run I/O
1337 * completion to mark the error state of the pages under writeback
1338 * appropriately.
1339 */
1340 if (unlikely(error)) {
Brian Foster763e4cd2020-10-29 14:30:48 -07001341 /*
1342 * Let the filesystem know what portion of the current page
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001343 * failed to map. If the page hasn't been added to ioend, it
Brian Foster763e4cd2020-10-29 14:30:48 -07001344 * won't be affected by I/O completion and we must unlock it
1345 * now.
1346 */
1347 if (wpc->ops->discard_page)
1348 wpc->ops->discard_page(page, file_offset);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001349 if (!count) {
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001350 ClearPageUptodate(page);
1351 unlock_page(page);
1352 goto done;
1353 }
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001354 }
1355
Brian Foster50e7d6c2020-10-29 14:30:49 -07001356 set_page_writeback(page);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001357 unlock_page(page);
1358
1359 /*
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001360 * Preserve the original error if there was one; catch
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001361 * submission errors here and propagate into subsequent ioend
1362 * submissions.
1363 */
1364 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1365 int error2;
1366
1367 list_del_init(&ioend->io_list);
1368 error2 = iomap_submit_ioend(wpc, ioend, error);
1369 if (error2 && !error)
1370 error = error2;
1371 }
1372
1373 /*
1374 * We can end up here with no error and nothing to write only if we race
1375 * with a partial page truncate on a sub-page block sized filesystem.
1376 */
1377 if (!count)
1378 end_page_writeback(page);
1379done:
1380 mapping_set_error(page->mapping, error);
1381 return error;
1382}
1383
1384/*
1385 * Write out a dirty page.
1386 *
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001387 * For delalloc space on the page, we need to allocate space and flush it.
1388 * For unwritten space on the page, we need to start the conversion to
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001389 * regular allocated space.
1390 */
1391static int
1392iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
1393{
1394 struct iomap_writepage_ctx *wpc = data;
1395 struct inode *inode = page->mapping->host;
1396 pgoff_t end_index;
1397 u64 end_offset;
1398 loff_t offset;
1399
Matthew Wilcox (Oracle)1ac99452020-03-05 07:21:43 -08001400 trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001401
1402 /*
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001403 * Refuse to write the page out if we're called from reclaim context.
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001404 *
1405 * This avoids stack overflows when called from deeply used stacks in
1406 * random callers for direct reclaim or memcg reclaim. We explicitly
1407 * allow reclaim from kswapd as the stack usage there is relatively low.
1408 *
1409 * This should never happen except in the case of a VM regression so
1410 * warn about it.
1411 */
1412 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1413 PF_MEMALLOC))
1414 goto redirty;
1415
1416 /*
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001417 * Is this page beyond the end of the file?
1418 *
1419 * The page index is less than the end_index, adjust the end_offset
1420 * to the highest offset that this page should represent.
1421 * -----------------------------------------------------
1422 * | file mapping | <EOF> |
1423 * -----------------------------------------------------
1424 * | Page ... | Page N-2 | Page N-1 | Page N | |
1425 * ^--------------------------------^----------|--------
1426 * | desired writeback range | see else |
1427 * ---------------------------------^------------------|
1428 */
1429 offset = i_size_read(inode);
1430 end_index = offset >> PAGE_SHIFT;
1431 if (page->index < end_index)
1432 end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
1433 else {
1434 /*
1435 * Check whether the page to write out is beyond or straddles
1436 * i_size or not.
1437 * -------------------------------------------------------
1438 * | file mapping | <EOF> |
1439 * -------------------------------------------------------
1440 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1441 * ^--------------------------------^-----------|---------
1442 * | | Straddles |
1443 * ---------------------------------^-----------|--------|
1444 */
1445 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1446
1447 /*
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001448 * Skip the page if it's fully outside i_size, e.g. due to a
1449 * truncate operation that's in progress. We must redirty the
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001450 * page so that reclaim stops reclaiming it. Otherwise
1451 * iomap_vm_releasepage() is called on it and gets confused.
1452 *
Andreas Gruenbacherf1f264b2021-08-02 14:46:31 -07001453 * Note that the end_index is unsigned long. If the given
1454 * offset is greater than 16TB on a 32-bit system then if we
1455 * checked if the page is fully outside i_size with
1456 * "if (page->index >= end_index + 1)", "end_index + 1" would
1457 * overflow and evaluate to 0. Hence this page would be
1458 * redirtied and written out repeatedly, which would result in
1459 * an infinite loop; the user program performing this operation
1460 * would hang. Instead, we can detect this situation by
1461 * checking if the page is totally beyond i_size or if its
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001462 * offset is just equal to the EOF.
1463 */
1464 if (page->index > end_index ||
1465 (page->index == end_index && offset_into_page == 0))
1466 goto redirty;
1467
1468 /*
1469 * The page straddles i_size. It must be zeroed out on each
1470 * and every writepage invocation because it may be mmapped.
1471 * "A file is mapped in multiples of the page size. For a file
1472 * that is not a multiple of the page size, the remaining
1473 * memory is zeroed when mapped, and writes to that region are
1474 * not written out to the file."
1475 */
1476 zero_user_segment(page, offset_into_page, PAGE_SIZE);
1477
1478 /* Adjust the end_offset to the end of file */
1479 end_offset = offset;
1480 }
1481
1482 return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
1483
1484redirty:
1485 redirty_page_for_writepage(wbc, page);
1486 unlock_page(page);
1487 return 0;
1488}
1489
1490int
1491iomap_writepage(struct page *page, struct writeback_control *wbc,
1492 struct iomap_writepage_ctx *wpc,
1493 const struct iomap_writeback_ops *ops)
1494{
1495 int ret;
1496
1497 wpc->ops = ops;
1498 ret = iomap_do_writepage(page, wbc, wpc);
1499 if (!wpc->ioend)
1500 return ret;
1501 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1502}
1503EXPORT_SYMBOL_GPL(iomap_writepage);
1504
1505int
1506iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1507 struct iomap_writepage_ctx *wpc,
1508 const struct iomap_writeback_ops *ops)
1509{
1510 int ret;
1511
1512 wpc->ops = ops;
1513 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1514 if (!wpc->ioend)
1515 return ret;
1516 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1517}
1518EXPORT_SYMBOL_GPL(iomap_writepages);
1519
1520static int __init iomap_init(void)
1521{
1522 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1523 offsetof(struct iomap_ioend, io_inline_bio),
1524 BIOSET_NEED_BVECS);
1525}
1526fs_initcall(iomap_init);