blob: c5ff13e0e7cfbb10382e15450462fd27c6f7cb3b [file] [log] [blame]
Darrick J. Wongafc51aa2019-07-15 08:50:59 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Red Hat, Inc.
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07004 * Copyright (C) 2016-2019 Christoph Hellwig.
Darrick J. Wongafc51aa2019-07-15 08:50:59 -07005 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/pagemap.h>
11#include <linux/uio.h>
12#include <linux/buffer_head.h>
13#include <linux/dax.h>
14#include <linux/writeback.h>
Christoph Hellwig598ecfb2019-10-17 13:12:15 -070015#include <linux/list_sort.h>
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070016#include <linux/swap.h>
17#include <linux/bio.h>
18#include <linux/sched/signal.h>
19#include <linux/migrate.h>
Christoph Hellwig9e91c572019-10-17 13:12:13 -070020#include "trace.h"
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070021
22#include "../internal.h"
23
Christoph Hellwigab08b012019-10-17 13:12:19 -070024/*
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070025 * Structure allocated for each page or THP when block size < page size
26 * to track sub-page uptodate status and I/O completions.
Christoph Hellwigab08b012019-10-17 13:12:19 -070027 */
28struct iomap_page {
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -070029 atomic_t read_bytes_pending;
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -070030 atomic_t write_bytes_pending;
Christoph Hellwig1cea3352019-12-04 09:33:52 -080031 spinlock_t uptodate_lock;
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070032 unsigned long uptodate[];
Christoph Hellwigab08b012019-10-17 13:12:19 -070033};
34
35static inline struct iomap_page *to_iomap_page(struct page *page)
36{
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070037 /*
38 * per-block data is stored in the head page. Callers should
39 * not be dealing with tail pages (and if they are, they can
40 * call thp_head() first.
41 */
42 VM_BUG_ON_PGFLAGS(PageTail(page), page);
43
Christoph Hellwigab08b012019-10-17 13:12:19 -070044 if (page_has_private(page))
45 return (struct iomap_page *)page_private(page);
46 return NULL;
47}
48
Christoph Hellwig598ecfb2019-10-17 13:12:15 -070049static struct bio_set iomap_ioend_bioset;
50
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070051static struct iomap_page *
52iomap_page_create(struct inode *inode, struct page *page)
53{
54 struct iomap_page *iop = to_iomap_page(page);
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070055 unsigned int nr_blocks = i_blocks_per_page(inode, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070056
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070057 if (iop || nr_blocks <= 1)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070058 return iop;
59
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070060 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
61 GFP_NOFS | __GFP_NOFAIL);
Christoph Hellwig1cea3352019-12-04 09:33:52 -080062 spin_lock_init(&iop->uptodate_lock);
Matthew Wilcox (Oracle)4595a292020-09-25 11:16:53 -070063 if (PageUptodate(page))
64 bitmap_fill(iop->uptodate, nr_blocks);
Guoqing Jiang58aeb732020-06-01 21:47:54 -070065 attach_page_private(page, iop);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070066 return iop;
67}
68
69static void
70iomap_page_release(struct page *page)
71{
Guoqing Jiang58aeb732020-06-01 21:47:54 -070072 struct iomap_page *iop = detach_page_private(page);
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070073 unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070074
75 if (!iop)
76 return;
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -070077 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -070078 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
Matthew Wilcox (Oracle)0a195b92020-09-21 08:58:40 -070079 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
80 PageUptodate(page));
Darrick J. Wongafc51aa2019-07-15 08:50:59 -070081 kfree(iop);
82}
83
84/*
85 * Calculate the range inside the page that we actually need to read.
86 */
87static void
88iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
89 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
90{
91 loff_t orig_pos = *pos;
92 loff_t isize = i_size_read(inode);
93 unsigned block_bits = inode->i_blkbits;
94 unsigned block_size = (1 << block_bits);
95 unsigned poff = offset_in_page(*pos);
96 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
97 unsigned first = poff >> block_bits;
98 unsigned last = (poff + plen - 1) >> block_bits;
99
100 /*
101 * If the block size is smaller than the page size we need to check the
102 * per-block uptodate status and adjust the offset and length if needed
103 * to avoid reading in already uptodate ranges.
104 */
105 if (iop) {
106 unsigned int i;
107
108 /* move forward for each leading block marked uptodate */
109 for (i = first; i <= last; i++) {
110 if (!test_bit(i, iop->uptodate))
111 break;
112 *pos += block_size;
113 poff += block_size;
114 plen -= block_size;
115 first++;
116 }
117
118 /* truncate len if we find any trailing uptodate block(s) */
119 for ( ; i <= last; i++) {
120 if (test_bit(i, iop->uptodate)) {
121 plen -= (last - i + 1) * block_size;
122 last = i - 1;
123 break;
124 }
125 }
126 }
127
128 /*
129 * If the extent spans the block that contains the i_size we need to
130 * handle both halves separately so that we properly zero data in the
131 * page cache for blocks that are entirely outside of i_size.
132 */
133 if (orig_pos <= isize && orig_pos + length > isize) {
134 unsigned end = offset_in_page(isize - 1) >> block_bits;
135
136 if (first <= end && last > end)
137 plen -= (last - end) * block_size;
138 }
139
140 *offp = poff;
141 *lenp = plen;
142}
143
144static void
Christoph Hellwig1cea3352019-12-04 09:33:52 -0800145iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700146{
147 struct iomap_page *iop = to_iomap_page(page);
148 struct inode *inode = page->mapping->host;
149 unsigned first = off >> inode->i_blkbits;
150 unsigned last = (off + len - 1) >> inode->i_blkbits;
Christoph Hellwig1cea3352019-12-04 09:33:52 -0800151 unsigned long flags;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700152
Christoph Hellwig1cea3352019-12-04 09:33:52 -0800153 spin_lock_irqsave(&iop->uptodate_lock, flags);
Matthew Wilcox (Oracle)b21866f2020-09-21 08:58:40 -0700154 bitmap_set(iop->uptodate, first, last - first + 1);
155 if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
Christoph Hellwig1cea3352019-12-04 09:33:52 -0800156 SetPageUptodate(page);
157 spin_unlock_irqrestore(&iop->uptodate_lock, flags);
158}
159
160static void
161iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
162{
163 if (PageError(page))
164 return;
165
166 if (page_has_private(page))
167 iomap_iop_set_range_uptodate(page, off, len);
168 else
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700169 SetPageUptodate(page);
170}
171
172static void
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700173iomap_read_page_end_io(struct bio_vec *bvec, int error)
174{
175 struct page *page = bvec->bv_page;
176 struct iomap_page *iop = to_iomap_page(page);
177
178 if (unlikely(error)) {
179 ClearPageUptodate(page);
180 SetPageError(page);
181 } else {
182 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
183 }
184
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -0700185 if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
186 unlock_page(page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700187}
188
189static void
190iomap_read_end_io(struct bio *bio)
191{
192 int error = blk_status_to_errno(bio->bi_status);
193 struct bio_vec *bvec;
194 struct bvec_iter_all iter_all;
195
196 bio_for_each_segment_all(bvec, bio, iter_all)
197 iomap_read_page_end_io(bvec, error);
198 bio_put(bio);
199}
200
201struct iomap_readpage_ctx {
202 struct page *cur_page;
203 bool cur_page_in_bio;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700204 struct bio *bio;
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700205 struct readahead_control *rac;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700206};
207
208static void
209iomap_read_inline_data(struct inode *inode, struct page *page,
210 struct iomap *iomap)
211{
212 size_t size = i_size_read(inode);
213 void *addr;
214
215 if (PageUptodate(page))
216 return;
217
218 BUG_ON(page->index);
219 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
220
221 addr = kmap_atomic(page);
222 memcpy(addr, iomap->inline_data, size);
223 memset(addr + size, 0, PAGE_SIZE - size);
224 kunmap_atomic(addr);
225 SetPageUptodate(page);
226}
227
Christoph Hellwig009d8d82019-10-17 13:12:12 -0700228static inline bool iomap_block_needs_zeroing(struct inode *inode,
229 struct iomap *iomap, loff_t pos)
230{
231 return iomap->type != IOMAP_MAPPED ||
232 (iomap->flags & IOMAP_F_NEW) ||
233 pos >= i_size_read(inode);
234}
235
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700236static loff_t
237iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700238 struct iomap *iomap, struct iomap *srcmap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700239{
240 struct iomap_readpage_ctx *ctx = data;
241 struct page *page = ctx->cur_page;
242 struct iomap_page *iop = iomap_page_create(inode, page);
243 bool same_page = false, is_contig = false;
244 loff_t orig_pos = pos;
245 unsigned poff, plen;
246 sector_t sector;
247
248 if (iomap->type == IOMAP_INLINE) {
249 WARN_ON_ONCE(pos);
250 iomap_read_inline_data(inode, page, iomap);
251 return PAGE_SIZE;
252 }
253
254 /* zero post-eof blocks as the page may be mapped */
255 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
256 if (plen == 0)
257 goto done;
258
Christoph Hellwig009d8d82019-10-17 13:12:12 -0700259 if (iomap_block_needs_zeroing(inode, iomap, pos)) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700260 zero_user(page, poff, plen);
261 iomap_set_range_uptodate(page, poff, plen);
262 goto done;
263 }
264
265 ctx->cur_page_in_bio = true;
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -0700266 if (iop)
267 atomic_add(plen, &iop->read_bytes_pending);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700268
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -0700269 /* Try to merge into a previous segment if we can */
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700270 sector = iomap_sector(iomap, pos);
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -0700271 if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
272 if (__bio_try_merge_page(ctx->bio, page, plen, poff,
273 &same_page))
274 goto done;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700275 is_contig = true;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700276 }
277
Matthew Wilcox (Oracle)7d636672020-09-21 08:58:40 -0700278 if (!is_contig || bio_full(ctx->bio, plen)) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700279 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
Matthew Wilcox (Oracle)457df332020-04-02 09:08:53 -0700280 gfp_t orig_gfp = gfp;
Matthew Wilcox (Oracle)5f7136d2021-01-29 04:38:57 +0000281 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700282
283 if (ctx->bio)
284 submit_bio(ctx->bio);
285
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700286 if (ctx->rac) /* same as readahead_gfp_mask */
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700287 gfp |= __GFP_NORETRY | __GFP_NOWARN;
Matthew Wilcox (Oracle)5f7136d2021-01-29 04:38:57 +0000288 ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
Matthew Wilcox (Oracle)457df332020-04-02 09:08:53 -0700289 /*
290 * If the bio_alloc fails, try it again for a single page to
291 * avoid having to deal with partial page reads. This emulates
292 * what do_mpage_readpage does.
293 */
294 if (!ctx->bio)
295 ctx->bio = bio_alloc(orig_gfp, 1);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700296 ctx->bio->bi_opf = REQ_OP_READ;
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700297 if (ctx->rac)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700298 ctx->bio->bi_opf |= REQ_RAHEAD;
299 ctx->bio->bi_iter.bi_sector = sector;
300 bio_set_dev(ctx->bio, iomap->bdev);
301 ctx->bio->bi_end_io = iomap_read_end_io;
302 }
303
304 bio_add_page(ctx->bio, page, plen, poff);
305done:
306 /*
307 * Move the caller beyond our range so that it keeps making progress.
308 * For that we have to include any leading non-uptodate ranges, but
309 * we can skip trailing ones as they will be handled in the next
310 * iteration.
311 */
312 return pos - orig_pos + plen;
313}
314
315int
316iomap_readpage(struct page *page, const struct iomap_ops *ops)
317{
318 struct iomap_readpage_ctx ctx = { .cur_page = page };
319 struct inode *inode = page->mapping->host;
320 unsigned poff;
321 loff_t ret;
322
Christoph Hellwig9e91c572019-10-17 13:12:13 -0700323 trace_iomap_readpage(page->mapping->host, 1);
324
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700325 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
326 ret = iomap_apply(inode, page_offset(page) + poff,
327 PAGE_SIZE - poff, 0, ops, &ctx,
328 iomap_readpage_actor);
329 if (ret <= 0) {
330 WARN_ON_ONCE(ret == 0);
331 SetPageError(page);
332 break;
333 }
334 }
335
336 if (ctx.bio) {
337 submit_bio(ctx.bio);
338 WARN_ON_ONCE(!ctx.cur_page_in_bio);
339 } else {
340 WARN_ON_ONCE(ctx.cur_page_in_bio);
341 unlock_page(page);
342 }
343
344 /*
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700345 * Just like mpage_readahead and block_read_full_page we always
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700346 * return 0 and just mark the page as PageError on errors. This
347 * should be cleaned up all through the stack eventually.
348 */
349 return 0;
350}
351EXPORT_SYMBOL_GPL(iomap_readpage);
352
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700353static loff_t
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700354iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700355 void *data, struct iomap *iomap, struct iomap *srcmap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700356{
357 struct iomap_readpage_ctx *ctx = data;
358 loff_t done, ret;
359
360 for (done = 0; done < length; done += ret) {
361 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
362 if (!ctx->cur_page_in_bio)
363 unlock_page(ctx->cur_page);
364 put_page(ctx->cur_page);
365 ctx->cur_page = NULL;
366 }
367 if (!ctx->cur_page) {
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700368 ctx->cur_page = readahead_page(ctx->rac);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700369 ctx->cur_page_in_bio = false;
370 }
371 ret = iomap_readpage_actor(inode, pos + done, length - done,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700372 ctx, iomap, srcmap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700373 }
374
375 return done;
376}
377
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700378/**
379 * iomap_readahead - Attempt to read pages from a file.
380 * @rac: Describes the pages to be read.
381 * @ops: The operations vector for the filesystem.
382 *
383 * This function is for filesystems to call to implement their readahead
384 * address_space operation.
385 *
386 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
387 * blocks from disc), and may wait for it. The caller may be trying to
388 * access a different page, and so sleeping excessively should be avoided.
389 * It may allocate memory, but should avoid costly allocations. This
390 * function is called with memalloc_nofs set, so allocations will not cause
391 * the filesystem to be reentered.
392 */
393void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700394{
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700395 struct inode *inode = rac->mapping->host;
396 loff_t pos = readahead_pos(rac);
397 loff_t length = readahead_length(rac);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700398 struct iomap_readpage_ctx ctx = {
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700399 .rac = rac,
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700400 };
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700401
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700402 trace_iomap_readahead(inode, readahead_count(rac));
Christoph Hellwig9e91c572019-10-17 13:12:13 -0700403
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700404 while (length > 0) {
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700405 loff_t ret = iomap_apply(inode, pos, length, 0, ops,
406 &ctx, iomap_readahead_actor);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700407 if (ret <= 0) {
408 WARN_ON_ONCE(ret == 0);
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700409 break;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700410 }
411 pos += ret;
412 length -= ret;
413 }
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700414
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700415 if (ctx.bio)
416 submit_bio(ctx.bio);
417 if (ctx.cur_page) {
418 if (!ctx.cur_page_in_bio)
419 unlock_page(ctx.cur_page);
420 put_page(ctx.cur_page);
421 }
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700422}
Matthew Wilcox (Oracle)9d24a132020-06-01 21:47:34 -0700423EXPORT_SYMBOL_GPL(iomap_readahead);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700424
425/*
426 * iomap_is_partially_uptodate checks whether blocks within a page are
427 * uptodate or not.
428 *
429 * Returns true if all blocks which correspond to a file portion
430 * we want to read within the page are uptodate.
431 */
432int
433iomap_is_partially_uptodate(struct page *page, unsigned long from,
434 unsigned long count)
435{
436 struct iomap_page *iop = to_iomap_page(page);
437 struct inode *inode = page->mapping->host;
438 unsigned len, first, last;
439 unsigned i;
440
441 /* Limit range to one page */
442 len = min_t(unsigned, PAGE_SIZE - from, count);
443
444 /* First and last blocks in range within page */
445 first = from >> inode->i_blkbits;
446 last = (from + len - 1) >> inode->i_blkbits;
447
448 if (iop) {
449 for (i = first; i <= last; i++)
450 if (!test_bit(i, iop->uptodate))
451 return 0;
452 return 1;
453 }
454
455 return 0;
456}
457EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
458
459int
460iomap_releasepage(struct page *page, gfp_t gfp_mask)
461{
Matthew Wilcox (Oracle)1ac99452020-03-05 07:21:43 -0800462 trace_iomap_releasepage(page->mapping->host, page_offset(page),
463 PAGE_SIZE);
Christoph Hellwig9e91c572019-10-17 13:12:13 -0700464
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700465 /*
466 * mm accommodates an old ext3 case where clean pages might not have had
467 * the dirty bit cleared. Thus, it can send actual dirty pages to
468 * ->releasepage() via shrink_active_list(), skip those here.
469 */
470 if (PageDirty(page) || PageWriteback(page))
471 return 0;
472 iomap_page_release(page);
473 return 1;
474}
475EXPORT_SYMBOL_GPL(iomap_releasepage);
476
477void
478iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
479{
Matthew Wilcox (Oracle)1ac99452020-03-05 07:21:43 -0800480 trace_iomap_invalidatepage(page->mapping->host, offset, len);
Christoph Hellwig9e91c572019-10-17 13:12:13 -0700481
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700482 /*
483 * If we are invalidating the entire page, clear the dirty state from it
484 * and release it to avoid unnecessary buildup of the LRU.
485 */
486 if (offset == 0 && len == PAGE_SIZE) {
487 WARN_ON_ONCE(PageWriteback(page));
488 cancel_dirty_page(page);
489 iomap_page_release(page);
490 }
491}
492EXPORT_SYMBOL_GPL(iomap_invalidatepage);
493
494#ifdef CONFIG_MIGRATION
495int
496iomap_migrate_page(struct address_space *mapping, struct page *newpage,
497 struct page *page, enum migrate_mode mode)
498{
499 int ret;
500
Linus Torvalds26473f82019-07-19 11:38:12 -0700501 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700502 if (ret != MIGRATEPAGE_SUCCESS)
503 return ret;
504
Guoqing Jiang58aeb732020-06-01 21:47:54 -0700505 if (page_has_private(page))
506 attach_page_private(newpage, detach_page_private(page));
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700507
508 if (mode != MIGRATE_SYNC_NO_COPY)
509 migrate_page_copy(newpage, page);
510 else
511 migrate_page_states(newpage, page);
512 return MIGRATEPAGE_SUCCESS;
513}
514EXPORT_SYMBOL_GPL(iomap_migrate_page);
515#endif /* CONFIG_MIGRATION */
516
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700517enum {
518 IOMAP_WRITE_F_UNSHARE = (1 << 0),
519};
520
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700521static void
522iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
523{
524 loff_t i_size = i_size_read(inode);
525
526 /*
527 * Only truncate newly allocated pages beyoned EOF, even if the
528 * write started inside the existing inode size.
529 */
530 if (pos + len > i_size)
531 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
532}
533
534static int
Christoph Hellwigd3b40432019-10-18 16:42:24 -0700535iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
536 unsigned plen, struct iomap *iomap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700537{
538 struct bio_vec bvec;
539 struct bio bio;
540
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700541 bio_init(&bio, &bvec, 1);
542 bio.bi_opf = REQ_OP_READ;
543 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
544 bio_set_dev(&bio, iomap->bdev);
545 __bio_add_page(&bio, page, plen, poff);
546 return submit_bio_wait(&bio);
547}
548
549static int
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700550__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700551 struct page *page, struct iomap *srcmap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700552{
553 struct iomap_page *iop = iomap_page_create(inode, page);
554 loff_t block_size = i_blocksize(inode);
Nikolay Borisov6cc19c52020-09-10 08:38:06 -0700555 loff_t block_start = round_down(pos, block_size);
556 loff_t block_end = round_up(pos + len, block_size);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700557 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700558
559 if (PageUptodate(page))
560 return 0;
Matthew Wilcox (Oracle)e6e7ca92020-09-10 08:26:17 -0700561 ClearPageError(page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700562
563 do {
564 iomap_adjust_read_range(inode, iop, &block_start,
565 block_end - block_start, &poff, &plen);
566 if (plen == 0)
567 break;
568
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700569 if (!(flags & IOMAP_WRITE_F_UNSHARE) &&
570 (from <= poff || from >= poff + plen) &&
Christoph Hellwigd3b40432019-10-18 16:42:24 -0700571 (to <= poff || to >= poff + plen))
572 continue;
573
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700574 if (iomap_block_needs_zeroing(inode, srcmap, block_start)) {
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700575 if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
576 return -EIO;
Christoph Hellwigd3b40432019-10-18 16:42:24 -0700577 zero_user_segments(page, poff, from, to, poff + plen);
Matthew Wilcox (Oracle)14284fe2020-09-10 08:26:18 -0700578 } else {
579 int status = iomap_read_page_sync(block_start, page,
580 poff, plen, srcmap);
581 if (status)
582 return status;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700583 }
Matthew Wilcox (Oracle)14284fe2020-09-10 08:26:18 -0700584 iomap_set_range_uptodate(page, poff, plen);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700585 } while ((block_start += plen) < block_end);
586
Christoph Hellwigd3b40432019-10-18 16:42:24 -0700587 return 0;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700588}
589
590static int
591iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700592 struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700593{
594 const struct iomap_page_ops *page_ops = iomap->page_ops;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700595 struct page *page;
596 int status = 0;
597
598 BUG_ON(pos + len > iomap->offset + iomap->length);
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700599 if (srcmap != iomap)
600 BUG_ON(pos + len > srcmap->offset + srcmap->length);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700601
602 if (fatal_signal_pending(current))
603 return -EINTR;
604
605 if (page_ops && page_ops->page_prepare) {
606 status = page_ops->page_prepare(inode, pos, len, iomap);
607 if (status)
608 return status;
609 }
610
Christoph Hellwigdcd61582019-10-18 16:41:12 -0700611 page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
612 AOP_FLAG_NOFS);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700613 if (!page) {
614 status = -ENOMEM;
615 goto out_no_page;
616 }
617
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700618 if (srcmap->type == IOMAP_INLINE)
619 iomap_read_inline_data(inode, page, srcmap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700620 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700621 status = __block_write_begin_int(page, pos, len, NULL, srcmap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700622 else
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700623 status = __iomap_write_begin(inode, pos, len, flags, page,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700624 srcmap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700625
626 if (unlikely(status))
627 goto out_unlock;
628
629 *pagep = page;
630 return 0;
631
632out_unlock:
633 unlock_page(page);
634 put_page(page);
635 iomap_write_failed(inode, pos, len);
636
637out_no_page:
638 if (page_ops && page_ops->page_done)
639 page_ops->page_done(inode, pos, 0, NULL, iomap);
640 return status;
641}
642
643int
644iomap_set_page_dirty(struct page *page)
645{
646 struct address_space *mapping = page_mapping(page);
647 int newly_dirty;
648
649 if (unlikely(!mapping))
650 return !TestSetPageDirty(page);
651
652 /*
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800653 * Lock out page's memcg migration to keep PageDirty
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700654 * synchronized with per-memcg dirty page counters.
655 */
656 lock_page_memcg(page);
657 newly_dirty = !TestSetPageDirty(page);
658 if (newly_dirty)
659 __set_page_dirty(page, mapping, 0);
660 unlock_page_memcg(page);
661
662 if (newly_dirty)
663 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
664 return newly_dirty;
665}
666EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
667
Matthew Wilcox (Oracle)e25ba8c2020-09-21 08:58:41 -0700668static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
669 size_t copied, struct page *page)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700670{
671 flush_dcache_page(page);
672
673 /*
674 * The blocks that were entirely written will now be uptodate, so we
675 * don't have to worry about a readpage reading them and overwriting a
676 * partial write. However if we have encountered a short write and only
677 * partially written into a block, it will not be marked uptodate, so a
678 * readpage might come in and destroy our partial write.
679 *
680 * Do the simplest thing, and just treat any short write to a non
681 * uptodate page as a zero-length write, and force the caller to redo
682 * the whole thing.
683 */
684 if (unlikely(copied < len && !PageUptodate(page)))
685 return 0;
686 iomap_set_range_uptodate(page, offset_in_page(pos), len);
687 iomap_set_page_dirty(page);
688 return copied;
689}
690
Matthew Wilcox (Oracle)e25ba8c2020-09-21 08:58:41 -0700691static size_t iomap_write_end_inline(struct inode *inode, struct page *page,
692 struct iomap *iomap, loff_t pos, size_t copied)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700693{
694 void *addr;
695
696 WARN_ON_ONCE(!PageUptodate(page));
697 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
698
Matthew Wilcox (Oracle)7ed3cd12020-09-21 08:58:38 -0700699 flush_dcache_page(page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700700 addr = kmap_atomic(page);
701 memcpy(iomap->inline_data + pos, addr + pos, copied);
702 kunmap_atomic(addr);
703
704 mark_inode_dirty(inode);
705 return copied;
706}
707
Matthew Wilcox (Oracle)e25ba8c2020-09-21 08:58:41 -0700708/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
709static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
710 size_t copied, struct page *page, struct iomap *iomap,
711 struct iomap *srcmap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700712{
713 const struct iomap_page_ops *page_ops = iomap->page_ops;
714 loff_t old_size = inode->i_size;
Matthew Wilcox (Oracle)e25ba8c2020-09-21 08:58:41 -0700715 size_t ret;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700716
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700717 if (srcmap->type == IOMAP_INLINE) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700718 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700719 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700720 ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
721 page, NULL);
722 } else {
Christoph Hellwigc12d6fa2019-10-18 16:40:57 -0700723 ret = __iomap_write_end(inode, pos, len, copied, page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700724 }
725
726 /*
727 * Update the in-memory inode size after copying the data into the page
728 * cache. It's up to the file system to write the updated size to disk,
729 * preferably after I/O completion so that no stale data is exposed.
730 */
731 if (pos + ret > old_size) {
732 i_size_write(inode, pos + ret);
733 iomap->flags |= IOMAP_F_SIZE_CHANGED;
734 }
735 unlock_page(page);
736
737 if (old_size < pos)
738 pagecache_isize_extended(inode, old_size, pos);
739 if (page_ops && page_ops->page_done)
740 page_ops->page_done(inode, pos, ret, page, iomap);
741 put_page(page);
742
743 if (ret < len)
744 iomap_write_failed(inode, pos, len);
745 return ret;
746}
747
748static loff_t
749iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700750 struct iomap *iomap, struct iomap *srcmap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700751{
752 struct iov_iter *i = data;
753 long status = 0;
754 ssize_t written = 0;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700755
756 do {
757 struct page *page;
758 unsigned long offset; /* Offset into pagecache page */
759 unsigned long bytes; /* Bytes to write to page */
760 size_t copied; /* Bytes copied from user */
761
762 offset = offset_in_page(pos);
763 bytes = min_t(unsigned long, PAGE_SIZE - offset,
764 iov_iter_count(i));
765again:
766 if (bytes > length)
767 bytes = length;
768
769 /*
770 * Bring in the user page that we will copy from _first_.
771 * Otherwise there's a nasty deadlock on copying from the
772 * same page as we're writing to, without it being marked
773 * up-to-date.
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700774 */
775 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
776 status = -EFAULT;
777 break;
778 }
779
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700780 status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
781 srcmap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700782 if (unlikely(status))
783 break;
784
785 if (mapping_writably_mapped(inode->i_mapping))
786 flush_dcache_page(page);
787
Al Virof0b65f32021-04-30 10:26:41 -0400788 copied = copy_page_from_iter_atomic(page, offset, bytes, i);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700789
Al Virobc1bb412021-05-31 00:32:44 -0400790 status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700791 srcmap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700792
Al Virof0b65f32021-04-30 10:26:41 -0400793 if (unlikely(copied != status))
794 iov_iter_revert(i, copied - status);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700795
Al Virof0b65f32021-04-30 10:26:41 -0400796 cond_resched();
Al Virobc1bb412021-05-31 00:32:44 -0400797 if (unlikely(status == 0)) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700798 /*
Al Virobc1bb412021-05-31 00:32:44 -0400799 * A short copy made iomap_write_end() reject the
800 * thing entirely. Might be memory poisoning
801 * halfway through, might be a race with munmap,
802 * might be severe memory pressure.
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700803 */
Al Virobc1bb412021-05-31 00:32:44 -0400804 if (copied)
805 bytes = copied;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700806 goto again;
807 }
Al Virof0b65f32021-04-30 10:26:41 -0400808 pos += status;
809 written += status;
810 length -= status;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700811
812 balance_dirty_pages_ratelimited(inode->i_mapping);
813 } while (iov_iter_count(i) && length);
814
815 return written ? written : status;
816}
817
818ssize_t
819iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
820 const struct iomap_ops *ops)
821{
822 struct inode *inode = iocb->ki_filp->f_mapping->host;
823 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
824
825 while (iov_iter_count(iter)) {
826 ret = iomap_apply(inode, pos, iov_iter_count(iter),
827 IOMAP_WRITE, ops, iter, iomap_write_actor);
828 if (ret <= 0)
829 break;
830 pos += ret;
831 written += ret;
832 }
833
834 return written ? written : ret;
835}
836EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
837
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700838static loff_t
Christoph Hellwig3590c4d2019-10-18 16:41:34 -0700839iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700840 struct iomap *iomap, struct iomap *srcmap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700841{
842 long status = 0;
Matthew Wilcox (Oracle)d4ff3b22020-06-08 20:58:29 -0700843 loff_t written = 0;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700844
Christoph Hellwig3590c4d2019-10-18 16:41:34 -0700845 /* don't bother with blocks that are not shared to start with */
846 if (!(iomap->flags & IOMAP_F_SHARED))
847 return length;
848 /* don't bother with holes or unwritten extents */
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700849 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
Christoph Hellwig3590c4d2019-10-18 16:41:34 -0700850 return length;
851
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700852 do {
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700853 unsigned long offset = offset_in_page(pos);
854 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
855 struct page *page;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700856
Christoph Hellwig32a38a42019-10-18 16:42:50 -0700857 status = iomap_write_begin(inode, pos, bytes,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700858 IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700859 if (unlikely(status))
860 return status;
861
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700862 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
863 srcmap);
Matthew Wilcox (Oracle)e25ba8c2020-09-21 08:58:41 -0700864 if (WARN_ON_ONCE(status == 0))
865 return -EIO;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700866
867 cond_resched();
868
869 pos += status;
870 written += status;
871 length -= status;
872
873 balance_dirty_pages_ratelimited(inode->i_mapping);
874 } while (length);
875
876 return written;
877}
878
879int
Christoph Hellwig3590c4d2019-10-18 16:41:34 -0700880iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700881 const struct iomap_ops *ops)
882{
883 loff_t ret;
884
885 while (len) {
886 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
Christoph Hellwig3590c4d2019-10-18 16:41:34 -0700887 iomap_unshare_actor);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700888 if (ret <= 0)
889 return ret;
890 pos += ret;
891 len -= ret;
892 }
893
894 return 0;
895}
Christoph Hellwig3590c4d2019-10-18 16:41:34 -0700896EXPORT_SYMBOL_GPL(iomap_file_unshare);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700897
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700898static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
899 struct iomap *iomap, struct iomap *srcmap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700900{
901 struct page *page;
902 int status;
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700903 unsigned offset = offset_in_page(pos);
904 unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700905
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700906 status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700907 if (status)
908 return status;
909
910 zero_user(page, offset, bytes);
911 mark_page_accessed(page);
912
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700913 return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700914}
915
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700916static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
917 loff_t length, void *data, struct iomap *iomap,
918 struct iomap *srcmap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700919{
920 bool *did_zero = data;
921 loff_t written = 0;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700922
923 /* already zeroed? we're done. */
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700924 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700925 return length;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700926
927 do {
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700928 s64 bytes;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700929
930 if (IS_DAX(inode))
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700931 bytes = dax_iomap_zero(pos, length, iomap);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700932 else
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700933 bytes = iomap_zero(inode, pos, length, iomap, srcmap);
934 if (bytes < 0)
935 return bytes;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700936
937 pos += bytes;
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700938 length -= bytes;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700939 written += bytes;
940 if (did_zero)
941 *did_zero = true;
Matthew Wilcox (Oracle)81ee8e52020-09-21 08:58:42 -0700942 } while (length > 0);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700943
944 return written;
945}
946
947int
948iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
949 const struct iomap_ops *ops)
950{
951 loff_t ret;
952
953 while (len > 0) {
954 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
955 ops, did_zero, iomap_zero_range_actor);
956 if (ret <= 0)
957 return ret;
958
959 pos += ret;
960 len -= ret;
961 }
962
963 return 0;
964}
965EXPORT_SYMBOL_GPL(iomap_zero_range);
966
967int
968iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
969 const struct iomap_ops *ops)
970{
971 unsigned int blocksize = i_blocksize(inode);
972 unsigned int off = pos & (blocksize - 1);
973
974 /* Block boundary? Nothing to do */
975 if (!off)
976 return 0;
977 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
978}
979EXPORT_SYMBOL_GPL(iomap_truncate_page);
980
981static loff_t
982iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -0700983 void *data, struct iomap *iomap, struct iomap *srcmap)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700984{
985 struct page *page = data;
986 int ret;
987
988 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
989 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
990 if (ret)
991 return ret;
992 block_commit_write(page, 0, length);
993 } else {
994 WARN_ON_ONCE(!PageUptodate(page));
995 iomap_page_create(inode, page);
996 set_page_dirty(page);
997 }
998
999 return length;
1000}
1001
1002vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1003{
1004 struct page *page = vmf->page;
1005 struct inode *inode = file_inode(vmf->vma->vm_file);
1006 unsigned long length;
Andreas Gruenbacher243145b2020-01-06 08:58:23 -08001007 loff_t offset;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -07001008 ssize_t ret;
1009
1010 lock_page(page);
Andreas Gruenbacher243145b2020-01-06 08:58:23 -08001011 ret = page_mkwrite_check_truncate(page, inode);
1012 if (ret < 0)
Darrick J. Wongafc51aa2019-07-15 08:50:59 -07001013 goto out_unlock;
Andreas Gruenbacher243145b2020-01-06 08:58:23 -08001014 length = ret;
Darrick J. Wongafc51aa2019-07-15 08:50:59 -07001015
Andreas Gruenbacher243145b2020-01-06 08:58:23 -08001016 offset = page_offset(page);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -07001017 while (length > 0) {
1018 ret = iomap_apply(inode, offset, length,
1019 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1020 iomap_page_mkwrite_actor);
1021 if (unlikely(ret <= 0))
1022 goto out_unlock;
1023 offset += ret;
1024 length -= ret;
1025 }
1026
1027 wait_for_stable_page(page);
1028 return VM_FAULT_LOCKED;
1029out_unlock:
1030 unlock_page(page);
1031 return block_page_mkwrite_return(ret);
1032}
1033EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001034
1035static void
Christoph Hellwig48d64cd2019-10-17 13:12:22 -07001036iomap_finish_page_writeback(struct inode *inode, struct page *page,
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001037 int error, unsigned int len)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001038{
Christoph Hellwig48d64cd2019-10-17 13:12:22 -07001039 struct iomap_page *iop = to_iomap_page(page);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001040
1041 if (error) {
Christoph Hellwig48d64cd2019-10-17 13:12:22 -07001042 SetPageError(page);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001043 mapping_set_error(inode->i_mapping, -EIO);
1044 }
1045
Matthew Wilcox (Oracle)24addd82020-09-21 08:58:39 -07001046 WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001047 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001048
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001049 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
Christoph Hellwig48d64cd2019-10-17 13:12:22 -07001050 end_page_writeback(page);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001051}
1052
1053/*
1054 * We're now finished for good with this ioend structure. Update the page
1055 * state, release holds on bios, and finally free up memory. Do not use the
1056 * ioend after this.
1057 */
1058static void
1059iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1060{
1061 struct inode *inode = ioend->io_inode;
1062 struct bio *bio = &ioend->io_inline_bio;
1063 struct bio *last = ioend->io_bio, *next;
1064 u64 start = bio->bi_iter.bi_sector;
Zorro Langc2757792019-12-04 22:59:02 -08001065 loff_t offset = ioend->io_offset;
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001066 bool quiet = bio_flagged(bio, BIO_QUIET);
1067
1068 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1069 struct bio_vec *bv;
1070 struct bvec_iter_all iter_all;
1071
1072 /*
1073 * For the last bio, bi_private points to the ioend, so we
1074 * need to explicitly end the iteration here.
1075 */
1076 if (bio == last)
1077 next = NULL;
1078 else
1079 next = bio->bi_private;
1080
1081 /* walk each page on bio, ending page IO on them */
1082 bio_for_each_segment_all(bv, bio, iter_all)
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001083 iomap_finish_page_writeback(inode, bv->bv_page, error,
1084 bv->bv_len);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001085 bio_put(bio);
1086 }
Zorro Langc2757792019-12-04 22:59:02 -08001087 /* The ioend has been freed by bio_put() */
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001088
1089 if (unlikely(error && !quiet)) {
1090 printk_ratelimited(KERN_ERR
Darrick J. Wong9cd0ed62019-10-17 14:02:07 -07001091"%s: writeback error on inode %lu, offset %lld, sector %llu",
Zorro Langc2757792019-12-04 22:59:02 -08001092 inode->i_sb->s_id, inode->i_ino, offset, start);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001093 }
1094}
1095
1096void
1097iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1098{
1099 struct list_head tmp;
1100
1101 list_replace_init(&ioend->io_list, &tmp);
1102 iomap_finish_ioend(ioend, error);
1103
1104 while (!list_empty(&tmp)) {
1105 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1106 list_del_init(&ioend->io_list);
1107 iomap_finish_ioend(ioend, error);
1108 }
1109}
1110EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1111
1112/*
1113 * We can merge two adjacent ioends if they have the same set of work to do.
1114 */
1115static bool
1116iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1117{
1118 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1119 return false;
1120 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1121 (next->io_flags & IOMAP_F_SHARED))
1122 return false;
1123 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1124 (next->io_type == IOMAP_UNWRITTEN))
1125 return false;
1126 if (ioend->io_offset + ioend->io_size != next->io_offset)
1127 return false;
1128 return true;
1129}
1130
1131void
Brian Foster6e552492021-05-04 08:54:29 -07001132iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001133{
1134 struct iomap_ioend *next;
1135
1136 INIT_LIST_HEAD(&ioend->io_list);
1137
1138 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1139 io_list))) {
1140 if (!iomap_ioend_can_merge(ioend, next))
1141 break;
1142 list_move_tail(&next->io_list, &ioend->io_list);
1143 ioend->io_size += next->io_size;
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001144 }
1145}
1146EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1147
1148static int
Sami Tolvanen4f0f5862021-04-08 11:28:34 -07001149iomap_ioend_compare(void *priv, const struct list_head *a,
1150 const struct list_head *b)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001151{
Christoph Hellwigb3d423e2019-10-17 13:12:20 -07001152 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1153 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001154
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001155 if (ia->io_offset < ib->io_offset)
1156 return -1;
Christoph Hellwigb3d423e2019-10-17 13:12:20 -07001157 if (ia->io_offset > ib->io_offset)
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001158 return 1;
1159 return 0;
1160}
1161
1162void
1163iomap_sort_ioends(struct list_head *ioend_list)
1164{
1165 list_sort(NULL, ioend_list, iomap_ioend_compare);
1166}
1167EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1168
1169static void iomap_writepage_end_bio(struct bio *bio)
1170{
1171 struct iomap_ioend *ioend = bio->bi_private;
1172
1173 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1174}
1175
1176/*
1177 * Submit the final bio for an ioend.
1178 *
1179 * If @error is non-zero, it means that we have a situation where some part of
1180 * the submission process has failed after we have marked paged for writeback
1181 * and unlocked them. In this situation, we need to fail the bio instead of
1182 * submitting it. This typically only happens on a filesystem shutdown.
1183 */
1184static int
1185iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1186 int error)
1187{
1188 ioend->io_bio->bi_private = ioend;
1189 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1190
1191 if (wpc->ops->prepare_ioend)
1192 error = wpc->ops->prepare_ioend(ioend, error);
1193 if (error) {
1194 /*
1195 * If we are failing the IO now, just mark the ioend with an
1196 * error and finish it. This will run IO completion immediately
1197 * as there is only one reference to the ioend at this point in
1198 * time.
1199 */
1200 ioend->io_bio->bi_status = errno_to_blk_status(error);
1201 bio_endio(ioend->io_bio);
1202 return error;
1203 }
1204
1205 submit_bio(ioend->io_bio);
1206 return 0;
1207}
1208
1209static struct iomap_ioend *
1210iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1211 loff_t offset, sector_t sector, struct writeback_control *wbc)
1212{
1213 struct iomap_ioend *ioend;
1214 struct bio *bio;
1215
Christoph Hellwiga8affc02021-03-11 12:01:37 +01001216 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001217 bio_set_dev(bio, wpc->iomap.bdev);
1218 bio->bi_iter.bi_sector = sector;
1219 bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
1220 bio->bi_write_hint = inode->i_write_hint;
1221 wbc_init_bio(wbc, bio);
1222
1223 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1224 INIT_LIST_HEAD(&ioend->io_list);
1225 ioend->io_type = wpc->iomap.type;
1226 ioend->io_flags = wpc->iomap.flags;
1227 ioend->io_inode = inode;
1228 ioend->io_size = 0;
1229 ioend->io_offset = offset;
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001230 ioend->io_bio = bio;
1231 return ioend;
1232}
1233
1234/*
1235 * Allocate a new bio, and chain the old bio to the new one.
1236 *
1237 * Note that we have to do perform the chaining in this unintuitive order
1238 * so that the bi_private linkage is set up in the right direction for the
1239 * traversal in iomap_finish_ioend().
1240 */
1241static struct bio *
1242iomap_chain_bio(struct bio *prev)
1243{
1244 struct bio *new;
1245
Christoph Hellwiga8affc02021-03-11 12:01:37 +01001246 new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001247 bio_copy_dev(new, prev);/* also copies over blkcg information */
1248 new->bi_iter.bi_sector = bio_end_sector(prev);
1249 new->bi_opf = prev->bi_opf;
1250 new->bi_write_hint = prev->bi_write_hint;
1251
1252 bio_chain(prev, new);
1253 bio_get(prev); /* for iomap_finish_ioend */
1254 submit_bio(prev);
1255 return new;
1256}
1257
1258static bool
1259iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1260 sector_t sector)
1261{
1262 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1263 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1264 return false;
1265 if (wpc->iomap.type != wpc->ioend->io_type)
1266 return false;
1267 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1268 return false;
1269 if (sector != bio_end_sector(wpc->ioend->io_bio))
1270 return false;
1271 return true;
1272}
1273
1274/*
1275 * Test to see if we have an existing ioend structure that we could append to
1276 * first, otherwise finish off the current ioend and start another.
1277 */
1278static void
1279iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
1280 struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1281 struct writeback_control *wbc, struct list_head *iolist)
1282{
1283 sector_t sector = iomap_sector(&wpc->iomap, offset);
1284 unsigned len = i_blocksize(inode);
1285 unsigned poff = offset & (PAGE_SIZE - 1);
1286 bool merged, same_page = false;
1287
1288 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
1289 if (wpc->ioend)
1290 list_add(&wpc->ioend->io_list, iolist);
1291 wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
1292 }
1293
1294 merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
1295 &same_page);
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001296 if (iop)
1297 atomic_add(len, &iop->write_bytes_pending);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001298
1299 if (!merged) {
1300 if (bio_full(wpc->ioend->io_bio, len)) {
1301 wpc->ioend->io_bio =
1302 iomap_chain_bio(wpc->ioend->io_bio);
1303 }
1304 bio_add_page(wpc->ioend->io_bio, page, len, poff);
1305 }
1306
1307 wpc->ioend->io_size += len;
1308 wbc_account_cgroup_owner(wbc, page, len);
1309}
1310
1311/*
1312 * We implement an immediate ioend submission policy here to avoid needing to
1313 * chain multiple ioends and hence nest mempool allocations which can violate
1314 * forward progress guarantees we need to provide. The current ioend we are
1315 * adding blocks to is cached on the writepage context, and if the new block
1316 * does not append to the cached ioend it will create a new ioend and cache that
1317 * instead.
1318 *
1319 * If a new ioend is created and cached, the old ioend is returned and queued
1320 * locally for submission once the entire page is processed or an error has been
1321 * detected. While ioends are submitted immediately after they are completed,
1322 * batching optimisations are provided by higher level block plugging.
1323 *
1324 * At the end of a writeback pass, there will be a cached ioend remaining on the
1325 * writepage context that the caller will need to submit.
1326 */
1327static int
1328iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1329 struct writeback_control *wbc, struct inode *inode,
1330 struct page *page, u64 end_offset)
1331{
1332 struct iomap_page *iop = to_iomap_page(page);
1333 struct iomap_ioend *ioend, *next;
1334 unsigned len = i_blocksize(inode);
1335 u64 file_offset; /* file offset of page */
1336 int error = 0, count = 0, i;
1337 LIST_HEAD(submit_list);
1338
Matthew Wilcox (Oracle)24addd82020-09-21 08:58:39 -07001339 WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
Matthew Wilcox (Oracle)0fb2d722020-09-21 08:58:41 -07001340 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001341
1342 /*
1343 * Walk through the page to find areas to write back. If we run off the
1344 * end of the current map or find the current map invalid, grab a new
1345 * one.
1346 */
1347 for (i = 0, file_offset = page_offset(page);
1348 i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
1349 i++, file_offset += len) {
1350 if (iop && !test_bit(i, iop->uptodate))
1351 continue;
1352
1353 error = wpc->ops->map_blocks(wpc, inode, file_offset);
1354 if (error)
1355 break;
Christoph Hellwig3e19e6f2019-10-17 13:12:17 -07001356 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1357 continue;
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001358 if (wpc->iomap.type == IOMAP_HOLE)
1359 continue;
1360 iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
1361 &submit_list);
1362 count++;
1363 }
1364
1365 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1366 WARN_ON_ONCE(!PageLocked(page));
1367 WARN_ON_ONCE(PageWriteback(page));
Brian Foster50e7d6c2020-10-29 14:30:49 -07001368 WARN_ON_ONCE(PageDirty(page));
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001369
1370 /*
1371 * We cannot cancel the ioend directly here on error. We may have
1372 * already set other pages under writeback and hence we have to run I/O
1373 * completion to mark the error state of the pages under writeback
1374 * appropriately.
1375 */
1376 if (unlikely(error)) {
Brian Foster763e4cd2020-10-29 14:30:48 -07001377 /*
1378 * Let the filesystem know what portion of the current page
1379 * failed to map. If the page wasn't been added to ioend, it
1380 * won't be affected by I/O completion and we must unlock it
1381 * now.
1382 */
1383 if (wpc->ops->discard_page)
1384 wpc->ops->discard_page(page, file_offset);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001385 if (!count) {
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001386 ClearPageUptodate(page);
1387 unlock_page(page);
1388 goto done;
1389 }
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001390 }
1391
Brian Foster50e7d6c2020-10-29 14:30:49 -07001392 set_page_writeback(page);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001393 unlock_page(page);
1394
1395 /*
1396 * Preserve the original error if there was one, otherwise catch
1397 * submission errors here and propagate into subsequent ioend
1398 * submissions.
1399 */
1400 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1401 int error2;
1402
1403 list_del_init(&ioend->io_list);
1404 error2 = iomap_submit_ioend(wpc, ioend, error);
1405 if (error2 && !error)
1406 error = error2;
1407 }
1408
1409 /*
1410 * We can end up here with no error and nothing to write only if we race
1411 * with a partial page truncate on a sub-page block sized filesystem.
1412 */
1413 if (!count)
1414 end_page_writeback(page);
1415done:
1416 mapping_set_error(page->mapping, error);
1417 return error;
1418}
1419
1420/*
1421 * Write out a dirty page.
1422 *
1423 * For delalloc space on the page we need to allocate space and flush it.
1424 * For unwritten space on the page we need to start the conversion to
1425 * regular allocated space.
1426 */
1427static int
1428iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
1429{
1430 struct iomap_writepage_ctx *wpc = data;
1431 struct inode *inode = page->mapping->host;
1432 pgoff_t end_index;
1433 u64 end_offset;
1434 loff_t offset;
1435
Matthew Wilcox (Oracle)1ac99452020-03-05 07:21:43 -08001436 trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001437
1438 /*
1439 * Refuse to write the page out if we are called from reclaim context.
1440 *
1441 * This avoids stack overflows when called from deeply used stacks in
1442 * random callers for direct reclaim or memcg reclaim. We explicitly
1443 * allow reclaim from kswapd as the stack usage there is relatively low.
1444 *
1445 * This should never happen except in the case of a VM regression so
1446 * warn about it.
1447 */
1448 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1449 PF_MEMALLOC))
1450 goto redirty;
1451
1452 /*
Christoph Hellwig598ecfb2019-10-17 13:12:15 -07001453 * Is this page beyond the end of the file?
1454 *
1455 * The page index is less than the end_index, adjust the end_offset
1456 * to the highest offset that this page should represent.
1457 * -----------------------------------------------------
1458 * | file mapping | <EOF> |
1459 * -----------------------------------------------------
1460 * | Page ... | Page N-2 | Page N-1 | Page N | |
1461 * ^--------------------------------^----------|--------
1462 * | desired writeback range | see else |
1463 * ---------------------------------^------------------|
1464 */
1465 offset = i_size_read(inode);
1466 end_index = offset >> PAGE_SHIFT;
1467 if (page->index < end_index)
1468 end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
1469 else {
1470 /*
1471 * Check whether the page to write out is beyond or straddles
1472 * i_size or not.
1473 * -------------------------------------------------------
1474 * | file mapping | <EOF> |
1475 * -------------------------------------------------------
1476 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1477 * ^--------------------------------^-----------|---------
1478 * | | Straddles |
1479 * ---------------------------------^-----------|--------|
1480 */
1481 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1482
1483 /*
1484 * Skip the page if it is fully outside i_size, e.g. due to a
1485 * truncate operation that is in progress. We must redirty the
1486 * page so that reclaim stops reclaiming it. Otherwise
1487 * iomap_vm_releasepage() is called on it and gets confused.
1488 *
1489 * Note that the end_index is unsigned long, it would overflow
1490 * if the given offset is greater than 16TB on 32-bit system
1491 * and if we do check the page is fully outside i_size or not
1492 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1493 * will be evaluated to 0. Hence this page will be redirtied
1494 * and be written out repeatedly which would result in an
1495 * infinite loop, the user program that perform this operation
1496 * will hang. Instead, we can verify this situation by checking
1497 * if the page to write is totally beyond the i_size or if it's
1498 * offset is just equal to the EOF.
1499 */
1500 if (page->index > end_index ||
1501 (page->index == end_index && offset_into_page == 0))
1502 goto redirty;
1503
1504 /*
1505 * The page straddles i_size. It must be zeroed out on each
1506 * and every writepage invocation because it may be mmapped.
1507 * "A file is mapped in multiples of the page size. For a file
1508 * that is not a multiple of the page size, the remaining
1509 * memory is zeroed when mapped, and writes to that region are
1510 * not written out to the file."
1511 */
1512 zero_user_segment(page, offset_into_page, PAGE_SIZE);
1513
1514 /* Adjust the end_offset to the end of file */
1515 end_offset = offset;
1516 }
1517
1518 return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
1519
1520redirty:
1521 redirty_page_for_writepage(wbc, page);
1522 unlock_page(page);
1523 return 0;
1524}
1525
1526int
1527iomap_writepage(struct page *page, struct writeback_control *wbc,
1528 struct iomap_writepage_ctx *wpc,
1529 const struct iomap_writeback_ops *ops)
1530{
1531 int ret;
1532
1533 wpc->ops = ops;
1534 ret = iomap_do_writepage(page, wbc, wpc);
1535 if (!wpc->ioend)
1536 return ret;
1537 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1538}
1539EXPORT_SYMBOL_GPL(iomap_writepage);
1540
1541int
1542iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1543 struct iomap_writepage_ctx *wpc,
1544 const struct iomap_writeback_ops *ops)
1545{
1546 int ret;
1547
1548 wpc->ops = ops;
1549 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1550 if (!wpc->ioend)
1551 return ret;
1552 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1553}
1554EXPORT_SYMBOL_GPL(iomap_writepages);
1555
1556static int __init iomap_init(void)
1557{
1558 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1559 offsetof(struct iomap_ioend, io_inline_bio),
1560 BIOSET_NEED_BVECS);
1561}
1562fs_initcall(iomap_init);