blob: 181ee8477aad89a82dc731c62f855ce85901a5bd [file] [log] [blame]
Darrick J. Wongafc51aa2019-07-15 08:50:59 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/pagemap.h>
11#include <linux/uio.h>
12#include <linux/buffer_head.h>
13#include <linux/dax.h>
14#include <linux/writeback.h>
15#include <linux/swap.h>
16#include <linux/bio.h>
17#include <linux/sched/signal.h>
18#include <linux/migrate.h>
19
20#include "../internal.h"
21
22static struct iomap_page *
23iomap_page_create(struct inode *inode, struct page *page)
24{
25 struct iomap_page *iop = to_iomap_page(page);
26
27 if (iop || i_blocksize(inode) == PAGE_SIZE)
28 return iop;
29
30 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
31 atomic_set(&iop->read_count, 0);
32 atomic_set(&iop->write_count, 0);
33 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
34
35 /*
36 * migrate_page_move_mapping() assumes that pages with private data have
37 * their count elevated by 1.
38 */
39 get_page(page);
40 set_page_private(page, (unsigned long)iop);
41 SetPagePrivate(page);
42 return iop;
43}
44
45static void
46iomap_page_release(struct page *page)
47{
48 struct iomap_page *iop = to_iomap_page(page);
49
50 if (!iop)
51 return;
52 WARN_ON_ONCE(atomic_read(&iop->read_count));
53 WARN_ON_ONCE(atomic_read(&iop->write_count));
54 ClearPagePrivate(page);
55 set_page_private(page, 0);
56 put_page(page);
57 kfree(iop);
58}
59
60/*
61 * Calculate the range inside the page that we actually need to read.
62 */
63static void
64iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
65 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
66{
67 loff_t orig_pos = *pos;
68 loff_t isize = i_size_read(inode);
69 unsigned block_bits = inode->i_blkbits;
70 unsigned block_size = (1 << block_bits);
71 unsigned poff = offset_in_page(*pos);
72 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
73 unsigned first = poff >> block_bits;
74 unsigned last = (poff + plen - 1) >> block_bits;
75
76 /*
77 * If the block size is smaller than the page size we need to check the
78 * per-block uptodate status and adjust the offset and length if needed
79 * to avoid reading in already uptodate ranges.
80 */
81 if (iop) {
82 unsigned int i;
83
84 /* move forward for each leading block marked uptodate */
85 for (i = first; i <= last; i++) {
86 if (!test_bit(i, iop->uptodate))
87 break;
88 *pos += block_size;
89 poff += block_size;
90 plen -= block_size;
91 first++;
92 }
93
94 /* truncate len if we find any trailing uptodate block(s) */
95 for ( ; i <= last; i++) {
96 if (test_bit(i, iop->uptodate)) {
97 plen -= (last - i + 1) * block_size;
98 last = i - 1;
99 break;
100 }
101 }
102 }
103
104 /*
105 * If the extent spans the block that contains the i_size we need to
106 * handle both halves separately so that we properly zero data in the
107 * page cache for blocks that are entirely outside of i_size.
108 */
109 if (orig_pos <= isize && orig_pos + length > isize) {
110 unsigned end = offset_in_page(isize - 1) >> block_bits;
111
112 if (first <= end && last > end)
113 plen -= (last - end) * block_size;
114 }
115
116 *offp = poff;
117 *lenp = plen;
118}
119
120static void
121iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
122{
123 struct iomap_page *iop = to_iomap_page(page);
124 struct inode *inode = page->mapping->host;
125 unsigned first = off >> inode->i_blkbits;
126 unsigned last = (off + len - 1) >> inode->i_blkbits;
127 unsigned int i;
128 bool uptodate = true;
129
130 if (iop) {
131 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
132 if (i >= first && i <= last)
133 set_bit(i, iop->uptodate);
134 else if (!test_bit(i, iop->uptodate))
135 uptodate = false;
136 }
137 }
138
139 if (uptodate && !PageError(page))
140 SetPageUptodate(page);
141}
142
143static void
144iomap_read_finish(struct iomap_page *iop, struct page *page)
145{
146 if (!iop || atomic_dec_and_test(&iop->read_count))
147 unlock_page(page);
148}
149
150static void
151iomap_read_page_end_io(struct bio_vec *bvec, int error)
152{
153 struct page *page = bvec->bv_page;
154 struct iomap_page *iop = to_iomap_page(page);
155
156 if (unlikely(error)) {
157 ClearPageUptodate(page);
158 SetPageError(page);
159 } else {
160 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
161 }
162
163 iomap_read_finish(iop, page);
164}
165
166static void
167iomap_read_end_io(struct bio *bio)
168{
169 int error = blk_status_to_errno(bio->bi_status);
170 struct bio_vec *bvec;
171 struct bvec_iter_all iter_all;
172
173 bio_for_each_segment_all(bvec, bio, iter_all)
174 iomap_read_page_end_io(bvec, error);
175 bio_put(bio);
176}
177
178struct iomap_readpage_ctx {
179 struct page *cur_page;
180 bool cur_page_in_bio;
181 bool is_readahead;
182 struct bio *bio;
183 struct list_head *pages;
184};
185
186static void
187iomap_read_inline_data(struct inode *inode, struct page *page,
188 struct iomap *iomap)
189{
190 size_t size = i_size_read(inode);
191 void *addr;
192
193 if (PageUptodate(page))
194 return;
195
196 BUG_ON(page->index);
197 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
198
199 addr = kmap_atomic(page);
200 memcpy(addr, iomap->inline_data, size);
201 memset(addr + size, 0, PAGE_SIZE - size);
202 kunmap_atomic(addr);
203 SetPageUptodate(page);
204}
205
Christoph Hellwig009d8d82019-10-17 13:12:12 -0700206static inline bool iomap_block_needs_zeroing(struct inode *inode,
207 struct iomap *iomap, loff_t pos)
208{
209 return iomap->type != IOMAP_MAPPED ||
210 (iomap->flags & IOMAP_F_NEW) ||
211 pos >= i_size_read(inode);
212}
213
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700214static loff_t
215iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
216 struct iomap *iomap)
217{
218 struct iomap_readpage_ctx *ctx = data;
219 struct page *page = ctx->cur_page;
220 struct iomap_page *iop = iomap_page_create(inode, page);
221 bool same_page = false, is_contig = false;
222 loff_t orig_pos = pos;
223 unsigned poff, plen;
224 sector_t sector;
225
226 if (iomap->type == IOMAP_INLINE) {
227 WARN_ON_ONCE(pos);
228 iomap_read_inline_data(inode, page, iomap);
229 return PAGE_SIZE;
230 }
231
232 /* zero post-eof blocks as the page may be mapped */
233 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
234 if (plen == 0)
235 goto done;
236
Christoph Hellwig009d8d82019-10-17 13:12:12 -0700237 if (iomap_block_needs_zeroing(inode, iomap, pos)) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700238 zero_user(page, poff, plen);
239 iomap_set_range_uptodate(page, poff, plen);
240 goto done;
241 }
242
243 ctx->cur_page_in_bio = true;
244
245 /*
246 * Try to merge into a previous segment if we can.
247 */
248 sector = iomap_sector(iomap, pos);
249 if (ctx->bio && bio_end_sector(ctx->bio) == sector)
250 is_contig = true;
251
252 if (is_contig &&
253 __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
254 if (!same_page && iop)
255 atomic_inc(&iop->read_count);
256 goto done;
257 }
258
259 /*
260 * If we start a new segment we need to increase the read count, and we
261 * need to do so before submitting any previous full bio to make sure
262 * that we don't prematurely unlock the page.
263 */
264 if (iop)
265 atomic_inc(&iop->read_count);
266
267 if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
268 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
269 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
270
271 if (ctx->bio)
272 submit_bio(ctx->bio);
273
274 if (ctx->is_readahead) /* same as readahead_gfp_mask */
275 gfp |= __GFP_NORETRY | __GFP_NOWARN;
276 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
277 ctx->bio->bi_opf = REQ_OP_READ;
278 if (ctx->is_readahead)
279 ctx->bio->bi_opf |= REQ_RAHEAD;
280 ctx->bio->bi_iter.bi_sector = sector;
281 bio_set_dev(ctx->bio, iomap->bdev);
282 ctx->bio->bi_end_io = iomap_read_end_io;
283 }
284
285 bio_add_page(ctx->bio, page, plen, poff);
286done:
287 /*
288 * Move the caller beyond our range so that it keeps making progress.
289 * For that we have to include any leading non-uptodate ranges, but
290 * we can skip trailing ones as they will be handled in the next
291 * iteration.
292 */
293 return pos - orig_pos + plen;
294}
295
296int
297iomap_readpage(struct page *page, const struct iomap_ops *ops)
298{
299 struct iomap_readpage_ctx ctx = { .cur_page = page };
300 struct inode *inode = page->mapping->host;
301 unsigned poff;
302 loff_t ret;
303
304 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
305 ret = iomap_apply(inode, page_offset(page) + poff,
306 PAGE_SIZE - poff, 0, ops, &ctx,
307 iomap_readpage_actor);
308 if (ret <= 0) {
309 WARN_ON_ONCE(ret == 0);
310 SetPageError(page);
311 break;
312 }
313 }
314
315 if (ctx.bio) {
316 submit_bio(ctx.bio);
317 WARN_ON_ONCE(!ctx.cur_page_in_bio);
318 } else {
319 WARN_ON_ONCE(ctx.cur_page_in_bio);
320 unlock_page(page);
321 }
322
323 /*
324 * Just like mpage_readpages and block_read_full_page we always
325 * return 0 and just mark the page as PageError on errors. This
326 * should be cleaned up all through the stack eventually.
327 */
328 return 0;
329}
330EXPORT_SYMBOL_GPL(iomap_readpage);
331
332static struct page *
333iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
334 loff_t length, loff_t *done)
335{
336 while (!list_empty(pages)) {
337 struct page *page = lru_to_page(pages);
338
339 if (page_offset(page) >= (u64)pos + length)
340 break;
341
342 list_del(&page->lru);
343 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
344 GFP_NOFS))
345 return page;
346
347 /*
348 * If we already have a page in the page cache at index we are
349 * done. Upper layers don't care if it is uptodate after the
350 * readpages call itself as every page gets checked again once
351 * actually needed.
352 */
353 *done += PAGE_SIZE;
354 put_page(page);
355 }
356
357 return NULL;
358}
359
360static loff_t
361iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
362 void *data, struct iomap *iomap)
363{
364 struct iomap_readpage_ctx *ctx = data;
365 loff_t done, ret;
366
367 for (done = 0; done < length; done += ret) {
368 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
369 if (!ctx->cur_page_in_bio)
370 unlock_page(ctx->cur_page);
371 put_page(ctx->cur_page);
372 ctx->cur_page = NULL;
373 }
374 if (!ctx->cur_page) {
375 ctx->cur_page = iomap_next_page(inode, ctx->pages,
376 pos, length, &done);
377 if (!ctx->cur_page)
378 break;
379 ctx->cur_page_in_bio = false;
380 }
381 ret = iomap_readpage_actor(inode, pos + done, length - done,
382 ctx, iomap);
383 }
384
385 return done;
386}
387
388int
389iomap_readpages(struct address_space *mapping, struct list_head *pages,
390 unsigned nr_pages, const struct iomap_ops *ops)
391{
392 struct iomap_readpage_ctx ctx = {
393 .pages = pages,
394 .is_readahead = true,
395 };
396 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
397 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
398 loff_t length = last - pos + PAGE_SIZE, ret = 0;
399
400 while (length > 0) {
401 ret = iomap_apply(mapping->host, pos, length, 0, ops,
402 &ctx, iomap_readpages_actor);
403 if (ret <= 0) {
404 WARN_ON_ONCE(ret == 0);
405 goto done;
406 }
407 pos += ret;
408 length -= ret;
409 }
410 ret = 0;
411done:
412 if (ctx.bio)
413 submit_bio(ctx.bio);
414 if (ctx.cur_page) {
415 if (!ctx.cur_page_in_bio)
416 unlock_page(ctx.cur_page);
417 put_page(ctx.cur_page);
418 }
419
420 /*
421 * Check that we didn't lose a page due to the arcance calling
422 * conventions..
423 */
424 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
425 return ret;
426}
427EXPORT_SYMBOL_GPL(iomap_readpages);
428
429/*
430 * iomap_is_partially_uptodate checks whether blocks within a page are
431 * uptodate or not.
432 *
433 * Returns true if all blocks which correspond to a file portion
434 * we want to read within the page are uptodate.
435 */
436int
437iomap_is_partially_uptodate(struct page *page, unsigned long from,
438 unsigned long count)
439{
440 struct iomap_page *iop = to_iomap_page(page);
441 struct inode *inode = page->mapping->host;
442 unsigned len, first, last;
443 unsigned i;
444
445 /* Limit range to one page */
446 len = min_t(unsigned, PAGE_SIZE - from, count);
447
448 /* First and last blocks in range within page */
449 first = from >> inode->i_blkbits;
450 last = (from + len - 1) >> inode->i_blkbits;
451
452 if (iop) {
453 for (i = first; i <= last; i++)
454 if (!test_bit(i, iop->uptodate))
455 return 0;
456 return 1;
457 }
458
459 return 0;
460}
461EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
462
463int
464iomap_releasepage(struct page *page, gfp_t gfp_mask)
465{
466 /*
467 * mm accommodates an old ext3 case where clean pages might not have had
468 * the dirty bit cleared. Thus, it can send actual dirty pages to
469 * ->releasepage() via shrink_active_list(), skip those here.
470 */
471 if (PageDirty(page) || PageWriteback(page))
472 return 0;
473 iomap_page_release(page);
474 return 1;
475}
476EXPORT_SYMBOL_GPL(iomap_releasepage);
477
478void
479iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
480{
481 /*
482 * If we are invalidating the entire page, clear the dirty state from it
483 * and release it to avoid unnecessary buildup of the LRU.
484 */
485 if (offset == 0 && len == PAGE_SIZE) {
486 WARN_ON_ONCE(PageWriteback(page));
487 cancel_dirty_page(page);
488 iomap_page_release(page);
489 }
490}
491EXPORT_SYMBOL_GPL(iomap_invalidatepage);
492
493#ifdef CONFIG_MIGRATION
494int
495iomap_migrate_page(struct address_space *mapping, struct page *newpage,
496 struct page *page, enum migrate_mode mode)
497{
498 int ret;
499
Linus Torvalds26473f82019-07-19 11:38:12 -0700500 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700501 if (ret != MIGRATEPAGE_SUCCESS)
502 return ret;
503
504 if (page_has_private(page)) {
505 ClearPagePrivate(page);
506 get_page(newpage);
507 set_page_private(newpage, page_private(page));
508 set_page_private(page, 0);
509 put_page(page);
510 SetPagePrivate(newpage);
511 }
512
513 if (mode != MIGRATE_SYNC_NO_COPY)
514 migrate_page_copy(newpage, page);
515 else
516 migrate_page_states(newpage, page);
517 return MIGRATEPAGE_SUCCESS;
518}
519EXPORT_SYMBOL_GPL(iomap_migrate_page);
520#endif /* CONFIG_MIGRATION */
521
522static void
523iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
524{
525 loff_t i_size = i_size_read(inode);
526
527 /*
528 * Only truncate newly allocated pages beyoned EOF, even if the
529 * write started inside the existing inode size.
530 */
531 if (pos + len > i_size)
532 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
533}
534
535static int
536iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
537 unsigned poff, unsigned plen, unsigned from, unsigned to,
538 struct iomap *iomap)
539{
540 struct bio_vec bvec;
541 struct bio bio;
542
Christoph Hellwig009d8d82019-10-17 13:12:12 -0700543 if (iomap_block_needs_zeroing(inode, iomap, block_start)) {
Darrick J. Wongafc51aa2019-07-15 08:50:59 -0700544 zero_user_segments(page, poff, from, to, poff + plen);
545 iomap_set_range_uptodate(page, poff, plen);
546 return 0;
547 }
548
549 bio_init(&bio, &bvec, 1);
550 bio.bi_opf = REQ_OP_READ;
551 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
552 bio_set_dev(&bio, iomap->bdev);
553 __bio_add_page(&bio, page, plen, poff);
554 return submit_bio_wait(&bio);
555}
556
557static int
558__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
559 struct page *page, struct iomap *iomap)
560{
561 struct iomap_page *iop = iomap_page_create(inode, page);
562 loff_t block_size = i_blocksize(inode);
563 loff_t block_start = pos & ~(block_size - 1);
564 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
565 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
566 int status = 0;
567
568 if (PageUptodate(page))
569 return 0;
570
571 do {
572 iomap_adjust_read_range(inode, iop, &block_start,
573 block_end - block_start, &poff, &plen);
574 if (plen == 0)
575 break;
576
577 if ((from > poff && from < poff + plen) ||
578 (to > poff && to < poff + plen)) {
579 status = iomap_read_page_sync(inode, block_start, page,
580 poff, plen, from, to, iomap);
581 if (status)
582 break;
583 }
584
585 } while ((block_start += plen) < block_end);
586
587 return status;
588}
589
590static int
591iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
592 struct page **pagep, struct iomap *iomap)
593{
594 const struct iomap_page_ops *page_ops = iomap->page_ops;
595 pgoff_t index = pos >> PAGE_SHIFT;
596 struct page *page;
597 int status = 0;
598
599 BUG_ON(pos + len > iomap->offset + iomap->length);
600
601 if (fatal_signal_pending(current))
602 return -EINTR;
603
604 if (page_ops && page_ops->page_prepare) {
605 status = page_ops->page_prepare(inode, pos, len, iomap);
606 if (status)
607 return status;
608 }
609
610 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
611 if (!page) {
612 status = -ENOMEM;
613 goto out_no_page;
614 }
615
616 if (iomap->type == IOMAP_INLINE)
617 iomap_read_inline_data(inode, page, iomap);
618 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
619 status = __block_write_begin_int(page, pos, len, NULL, iomap);
620 else
621 status = __iomap_write_begin(inode, pos, len, page, iomap);
622
623 if (unlikely(status))
624 goto out_unlock;
625
626 *pagep = page;
627 return 0;
628
629out_unlock:
630 unlock_page(page);
631 put_page(page);
632 iomap_write_failed(inode, pos, len);
633
634out_no_page:
635 if (page_ops && page_ops->page_done)
636 page_ops->page_done(inode, pos, 0, NULL, iomap);
637 return status;
638}
639
640int
641iomap_set_page_dirty(struct page *page)
642{
643 struct address_space *mapping = page_mapping(page);
644 int newly_dirty;
645
646 if (unlikely(!mapping))
647 return !TestSetPageDirty(page);
648
649 /*
650 * Lock out page->mem_cgroup migration to keep PageDirty
651 * synchronized with per-memcg dirty page counters.
652 */
653 lock_page_memcg(page);
654 newly_dirty = !TestSetPageDirty(page);
655 if (newly_dirty)
656 __set_page_dirty(page, mapping, 0);
657 unlock_page_memcg(page);
658
659 if (newly_dirty)
660 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
661 return newly_dirty;
662}
663EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
664
665static int
666__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
667 unsigned copied, struct page *page, struct iomap *iomap)
668{
669 flush_dcache_page(page);
670
671 /*
672 * The blocks that were entirely written will now be uptodate, so we
673 * don't have to worry about a readpage reading them and overwriting a
674 * partial write. However if we have encountered a short write and only
675 * partially written into a block, it will not be marked uptodate, so a
676 * readpage might come in and destroy our partial write.
677 *
678 * Do the simplest thing, and just treat any short write to a non
679 * uptodate page as a zero-length write, and force the caller to redo
680 * the whole thing.
681 */
682 if (unlikely(copied < len && !PageUptodate(page)))
683 return 0;
684 iomap_set_range_uptodate(page, offset_in_page(pos), len);
685 iomap_set_page_dirty(page);
686 return copied;
687}
688
689static int
690iomap_write_end_inline(struct inode *inode, struct page *page,
691 struct iomap *iomap, loff_t pos, unsigned copied)
692{
693 void *addr;
694
695 WARN_ON_ONCE(!PageUptodate(page));
696 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
697
698 addr = kmap_atomic(page);
699 memcpy(iomap->inline_data + pos, addr + pos, copied);
700 kunmap_atomic(addr);
701
702 mark_inode_dirty(inode);
703 return copied;
704}
705
706static int
707iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
708 unsigned copied, struct page *page, struct iomap *iomap)
709{
710 const struct iomap_page_ops *page_ops = iomap->page_ops;
711 loff_t old_size = inode->i_size;
712 int ret;
713
714 if (iomap->type == IOMAP_INLINE) {
715 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
716 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
717 ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
718 page, NULL);
719 } else {
720 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
721 }
722
723 /*
724 * Update the in-memory inode size after copying the data into the page
725 * cache. It's up to the file system to write the updated size to disk,
726 * preferably after I/O completion so that no stale data is exposed.
727 */
728 if (pos + ret > old_size) {
729 i_size_write(inode, pos + ret);
730 iomap->flags |= IOMAP_F_SIZE_CHANGED;
731 }
732 unlock_page(page);
733
734 if (old_size < pos)
735 pagecache_isize_extended(inode, old_size, pos);
736 if (page_ops && page_ops->page_done)
737 page_ops->page_done(inode, pos, ret, page, iomap);
738 put_page(page);
739
740 if (ret < len)
741 iomap_write_failed(inode, pos, len);
742 return ret;
743}
744
745static loff_t
746iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
747 struct iomap *iomap)
748{
749 struct iov_iter *i = data;
750 long status = 0;
751 ssize_t written = 0;
752 unsigned int flags = AOP_FLAG_NOFS;
753
754 do {
755 struct page *page;
756 unsigned long offset; /* Offset into pagecache page */
757 unsigned long bytes; /* Bytes to write to page */
758 size_t copied; /* Bytes copied from user */
759
760 offset = offset_in_page(pos);
761 bytes = min_t(unsigned long, PAGE_SIZE - offset,
762 iov_iter_count(i));
763again:
764 if (bytes > length)
765 bytes = length;
766
767 /*
768 * Bring in the user page that we will copy from _first_.
769 * Otherwise there's a nasty deadlock on copying from the
770 * same page as we're writing to, without it being marked
771 * up-to-date.
772 *
773 * Not only is this an optimisation, but it is also required
774 * to check that the address is actually valid, when atomic
775 * usercopies are used, below.
776 */
777 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
778 status = -EFAULT;
779 break;
780 }
781
782 status = iomap_write_begin(inode, pos, bytes, flags, &page,
783 iomap);
784 if (unlikely(status))
785 break;
786
787 if (mapping_writably_mapped(inode->i_mapping))
788 flush_dcache_page(page);
789
790 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
791
792 flush_dcache_page(page);
793
794 status = iomap_write_end(inode, pos, bytes, copied, page,
795 iomap);
796 if (unlikely(status < 0))
797 break;
798 copied = status;
799
800 cond_resched();
801
802 iov_iter_advance(i, copied);
803 if (unlikely(copied == 0)) {
804 /*
805 * If we were unable to copy any data at all, we must
806 * fall back to a single segment length write.
807 *
808 * If we didn't fallback here, we could livelock
809 * because not all segments in the iov can be copied at
810 * once without a pagefault.
811 */
812 bytes = min_t(unsigned long, PAGE_SIZE - offset,
813 iov_iter_single_seg_count(i));
814 goto again;
815 }
816 pos += copied;
817 written += copied;
818 length -= copied;
819
820 balance_dirty_pages_ratelimited(inode->i_mapping);
821 } while (iov_iter_count(i) && length);
822
823 return written ? written : status;
824}
825
826ssize_t
827iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
828 const struct iomap_ops *ops)
829{
830 struct inode *inode = iocb->ki_filp->f_mapping->host;
831 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
832
833 while (iov_iter_count(iter)) {
834 ret = iomap_apply(inode, pos, iov_iter_count(iter),
835 IOMAP_WRITE, ops, iter, iomap_write_actor);
836 if (ret <= 0)
837 break;
838 pos += ret;
839 written += ret;
840 }
841
842 return written ? written : ret;
843}
844EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
845
846static struct page *
847__iomap_read_page(struct inode *inode, loff_t offset)
848{
849 struct address_space *mapping = inode->i_mapping;
850 struct page *page;
851
852 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
853 if (IS_ERR(page))
854 return page;
855 if (!PageUptodate(page)) {
856 put_page(page);
857 return ERR_PTR(-EIO);
858 }
859 return page;
860}
861
862static loff_t
863iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
864 struct iomap *iomap)
865{
866 long status = 0;
867 ssize_t written = 0;
868
869 do {
870 struct page *page, *rpage;
871 unsigned long offset; /* Offset into pagecache page */
872 unsigned long bytes; /* Bytes to write to page */
873
874 offset = offset_in_page(pos);
875 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
876
877 rpage = __iomap_read_page(inode, pos);
878 if (IS_ERR(rpage))
879 return PTR_ERR(rpage);
880
881 status = iomap_write_begin(inode, pos, bytes,
882 AOP_FLAG_NOFS, &page, iomap);
883 put_page(rpage);
884 if (unlikely(status))
885 return status;
886
887 WARN_ON_ONCE(!PageUptodate(page));
888
889 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
890 if (unlikely(status <= 0)) {
891 if (WARN_ON_ONCE(status == 0))
892 return -EIO;
893 return status;
894 }
895
896 cond_resched();
897
898 pos += status;
899 written += status;
900 length -= status;
901
902 balance_dirty_pages_ratelimited(inode->i_mapping);
903 } while (length);
904
905 return written;
906}
907
908int
909iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
910 const struct iomap_ops *ops)
911{
912 loff_t ret;
913
914 while (len) {
915 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
916 iomap_dirty_actor);
917 if (ret <= 0)
918 return ret;
919 pos += ret;
920 len -= ret;
921 }
922
923 return 0;
924}
925EXPORT_SYMBOL_GPL(iomap_file_dirty);
926
927static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
928 unsigned bytes, struct iomap *iomap)
929{
930 struct page *page;
931 int status;
932
933 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
934 iomap);
935 if (status)
936 return status;
937
938 zero_user(page, offset, bytes);
939 mark_page_accessed(page);
940
941 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
942}
943
944static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
945 struct iomap *iomap)
946{
947 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
948 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
949}
950
951static loff_t
952iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
953 void *data, struct iomap *iomap)
954{
955 bool *did_zero = data;
956 loff_t written = 0;
957 int status;
958
959 /* already zeroed? we're done. */
960 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
961 return count;
962
963 do {
964 unsigned offset, bytes;
965
966 offset = offset_in_page(pos);
967 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
968
969 if (IS_DAX(inode))
970 status = iomap_dax_zero(pos, offset, bytes, iomap);
971 else
972 status = iomap_zero(inode, pos, offset, bytes, iomap);
973 if (status < 0)
974 return status;
975
976 pos += bytes;
977 count -= bytes;
978 written += bytes;
979 if (did_zero)
980 *did_zero = true;
981 } while (count > 0);
982
983 return written;
984}
985
986int
987iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
988 const struct iomap_ops *ops)
989{
990 loff_t ret;
991
992 while (len > 0) {
993 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
994 ops, did_zero, iomap_zero_range_actor);
995 if (ret <= 0)
996 return ret;
997
998 pos += ret;
999 len -= ret;
1000 }
1001
1002 return 0;
1003}
1004EXPORT_SYMBOL_GPL(iomap_zero_range);
1005
1006int
1007iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1008 const struct iomap_ops *ops)
1009{
1010 unsigned int blocksize = i_blocksize(inode);
1011 unsigned int off = pos & (blocksize - 1);
1012
1013 /* Block boundary? Nothing to do */
1014 if (!off)
1015 return 0;
1016 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1017}
1018EXPORT_SYMBOL_GPL(iomap_truncate_page);
1019
1020static loff_t
1021iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1022 void *data, struct iomap *iomap)
1023{
1024 struct page *page = data;
1025 int ret;
1026
1027 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1028 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1029 if (ret)
1030 return ret;
1031 block_commit_write(page, 0, length);
1032 } else {
1033 WARN_ON_ONCE(!PageUptodate(page));
1034 iomap_page_create(inode, page);
1035 set_page_dirty(page);
1036 }
1037
1038 return length;
1039}
1040
1041vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1042{
1043 struct page *page = vmf->page;
1044 struct inode *inode = file_inode(vmf->vma->vm_file);
1045 unsigned long length;
1046 loff_t offset, size;
1047 ssize_t ret;
1048
1049 lock_page(page);
1050 size = i_size_read(inode);
1051 if ((page->mapping != inode->i_mapping) ||
1052 (page_offset(page) > size)) {
1053 /* We overload EFAULT to mean page got truncated */
1054 ret = -EFAULT;
1055 goto out_unlock;
1056 }
1057
1058 /* page is wholly or partially inside EOF */
1059 if (((page->index + 1) << PAGE_SHIFT) > size)
1060 length = offset_in_page(size);
1061 else
1062 length = PAGE_SIZE;
1063
1064 offset = page_offset(page);
1065 while (length > 0) {
1066 ret = iomap_apply(inode, offset, length,
1067 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1068 iomap_page_mkwrite_actor);
1069 if (unlikely(ret <= 0))
1070 goto out_unlock;
1071 offset += ret;
1072 length -= ret;
1073 }
1074
1075 wait_for_stable_page(page);
1076 return VM_FAULT_LOCKED;
1077out_unlock:
1078 unlock_page(page);
1079 return block_page_mkwrite_return(ret);
1080}
1081EXPORT_SYMBOL_GPL(iomap_page_mkwrite);