blob: f15c705c5a93abe4466e7eec2b6281cc7f7ca776 [file] [log] [blame]
Christoph Hellwig73ce6ab2019-04-28 08:34:02 -07001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwigae259a92016-06-21 09:23:11 +10002/*
3 * Copyright (C) 2010 Red Hat, Inc.
Christoph Hellwig72b4daa2018-06-19 15:10:57 -07004 * Copyright (c) 2016-2018 Christoph Hellwig.
Christoph Hellwigae259a92016-06-21 09:23:11 +10005 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/uaccess.h>
11#include <linux/gfp.h>
Christoph Hellwig9dc55f12018-07-11 22:26:05 -070012#include <linux/migrate.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100013#include <linux/mm.h>
Christoph Hellwig72b4daa2018-06-19 15:10:57 -070014#include <linux/mm_inline.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100015#include <linux/swap.h>
16#include <linux/pagemap.h>
Christoph Hellwig8a78cb12018-06-01 09:04:40 -070017#include <linux/pagevec.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100018#include <linux/file.h>
19#include <linux/uio.h>
20#include <linux/backing-dev.h>
21#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110022#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100023#include <linux/dax.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010024#include <linux/sched/signal.h>
25
Christoph Hellwigae259a92016-06-21 09:23:11 +100026#include "internal.h"
27
Christoph Hellwigae259a92016-06-21 09:23:11 +100028/*
29 * Execute a iomap write on a segment of the mapping that spans a
30 * contiguous range of pages that have identical block mapping state.
31 *
32 * This avoids the need to map pages individually, do individual allocations
33 * for each page and most importantly avoid the need for filesystem specific
34 * locking per page. Instead, all the operations are amortised over the entire
35 * range of pages. It is assumed that the filesystems will lock whatever
36 * resources they require in the iomap_begin call, and release them in the
37 * iomap_end call.
38 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100039loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100040iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080041 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
Christoph Hellwigae259a92016-06-21 09:23:11 +100042{
43 struct iomap iomap = { 0 };
44 loff_t written = 0, ret;
45
46 /*
47 * Need to map a range from start position for length bytes. This can
48 * span multiple pages - it is only guaranteed to return a range of a
49 * single type of pages (e.g. all into a hole, all mapped or all
50 * unwritten). Failure at this point has nothing to undo.
51 *
52 * If allocation is required for this range, reserve the space now so
53 * that the allocation is guaranteed to succeed later on. Once we copy
54 * the data into the page cache pages, then we cannot fail otherwise we
55 * expose transient stale data. If the reserve fails, we can safely
56 * back out at this point as there is nothing to undo.
57 */
58 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
59 if (ret)
60 return ret;
61 if (WARN_ON(iomap.offset > pos))
62 return -EIO;
Darrick J. Wong0c6dda72018-01-26 11:11:20 -080063 if (WARN_ON(iomap.length == 0))
64 return -EIO;
Christoph Hellwigae259a92016-06-21 09:23:11 +100065
66 /*
67 * Cut down the length to the one actually provided by the filesystem,
68 * as it might not be able to give us the whole size that we requested.
69 */
70 if (iomap.offset + iomap.length < pos + length)
71 length = iomap.offset + iomap.length - pos;
72
73 /*
74 * Now that we have guaranteed that the space allocation will succeed.
75 * we can do the copy-in page by page without having to worry about
76 * failures exposing transient data.
77 */
78 written = actor(inode, pos, length, data, &iomap);
79
80 /*
81 * Now the data has been copied, commit the range we've copied. This
82 * should not fail unless the filesystem has had a fatal error.
83 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100084 if (ops->iomap_end) {
85 ret = ops->iomap_end(inode, pos, length,
86 written > 0 ? written : 0,
87 flags, &iomap);
88 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100089
90 return written ? written : ret;
91}
92
Christoph Hellwig57fc5052018-06-01 09:03:08 -070093static sector_t
94iomap_sector(struct iomap *iomap, loff_t pos)
95{
96 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
97}
98
Christoph Hellwig9dc55f12018-07-11 22:26:05 -070099static struct iomap_page *
100iomap_page_create(struct inode *inode, struct page *page)
101{
102 struct iomap_page *iop = to_iomap_page(page);
103
104 if (iop || i_blocksize(inode) == PAGE_SIZE)
105 return iop;
106
107 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
108 atomic_set(&iop->read_count, 0);
109 atomic_set(&iop->write_count, 0);
110 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
Piotr Jaroszynski8e47a452019-01-27 08:46:45 -0800111
112 /*
113 * migrate_page_move_mapping() assumes that pages with private data have
114 * their count elevated by 1.
115 */
116 get_page(page);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700117 set_page_private(page, (unsigned long)iop);
118 SetPagePrivate(page);
119 return iop;
120}
121
122static void
123iomap_page_release(struct page *page)
124{
125 struct iomap_page *iop = to_iomap_page(page);
126
127 if (!iop)
128 return;
129 WARN_ON_ONCE(atomic_read(&iop->read_count));
130 WARN_ON_ONCE(atomic_read(&iop->write_count));
131 ClearPagePrivate(page);
132 set_page_private(page, 0);
Piotr Jaroszynski8e47a452019-01-27 08:46:45 -0800133 put_page(page);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700134 kfree(iop);
135}
136
137/*
138 * Calculate the range inside the page that we actually need to read.
139 */
140static void
141iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
142 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
143{
Dave Chinner8c110d42018-11-21 08:06:37 -0800144 loff_t orig_pos = *pos;
145 loff_t isize = i_size_read(inode);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700146 unsigned block_bits = inode->i_blkbits;
147 unsigned block_size = (1 << block_bits);
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700148 unsigned poff = offset_in_page(*pos);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700149 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
150 unsigned first = poff >> block_bits;
151 unsigned last = (poff + plen - 1) >> block_bits;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700152
153 /*
154 * If the block size is smaller than the page size we need to check the
155 * per-block uptodate status and adjust the offset and length if needed
156 * to avoid reading in already uptodate ranges.
157 */
158 if (iop) {
159 unsigned int i;
160
161 /* move forward for each leading block marked uptodate */
162 for (i = first; i <= last; i++) {
163 if (!test_bit(i, iop->uptodate))
164 break;
165 *pos += block_size;
166 poff += block_size;
167 plen -= block_size;
168 first++;
169 }
170
171 /* truncate len if we find any trailing uptodate block(s) */
172 for ( ; i <= last; i++) {
173 if (test_bit(i, iop->uptodate)) {
174 plen -= (last - i + 1) * block_size;
175 last = i - 1;
176 break;
177 }
178 }
179 }
180
181 /*
182 * If the extent spans the block that contains the i_size we need to
183 * handle both halves separately so that we properly zero data in the
184 * page cache for blocks that are entirely outside of i_size.
185 */
Dave Chinner8c110d42018-11-21 08:06:37 -0800186 if (orig_pos <= isize && orig_pos + length > isize) {
187 unsigned end = offset_in_page(isize - 1) >> block_bits;
188
189 if (first <= end && last > end)
190 plen -= (last - end) * block_size;
191 }
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700192
193 *offp = poff;
194 *lenp = plen;
195}
196
197static void
198iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
199{
200 struct iomap_page *iop = to_iomap_page(page);
201 struct inode *inode = page->mapping->host;
202 unsigned first = off >> inode->i_blkbits;
203 unsigned last = (off + len - 1) >> inode->i_blkbits;
204 unsigned int i;
205 bool uptodate = true;
206
207 if (iop) {
208 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
209 if (i >= first && i <= last)
210 set_bit(i, iop->uptodate);
211 else if (!test_bit(i, iop->uptodate))
212 uptodate = false;
213 }
214 }
215
216 if (uptodate && !PageError(page))
217 SetPageUptodate(page);
218}
219
220static void
221iomap_read_finish(struct iomap_page *iop, struct page *page)
222{
223 if (!iop || atomic_dec_and_test(&iop->read_count))
224 unlock_page(page);
225}
226
227static void
228iomap_read_page_end_io(struct bio_vec *bvec, int error)
229{
230 struct page *page = bvec->bv_page;
231 struct iomap_page *iop = to_iomap_page(page);
232
233 if (unlikely(error)) {
234 ClearPageUptodate(page);
235 SetPageError(page);
236 } else {
237 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
238 }
239
240 iomap_read_finish(iop, page);
241}
242
Christoph Hellwigae259a92016-06-21 09:23:11 +1000243static void
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700244iomap_read_end_io(struct bio *bio)
245{
246 int error = blk_status_to_errno(bio->bi_status);
247 struct bio_vec *bvec;
Ming Lei6dc4f102019-02-15 19:13:19 +0800248 struct bvec_iter_all iter_all;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700249
Christoph Hellwig2b070cf2019-04-25 09:03:00 +0200250 bio_for_each_segment_all(bvec, bio, iter_all)
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700251 iomap_read_page_end_io(bvec, error);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700252 bio_put(bio);
253}
254
255struct iomap_readpage_ctx {
256 struct page *cur_page;
257 bool cur_page_in_bio;
258 bool is_readahead;
259 struct bio *bio;
260 struct list_head *pages;
261};
262
Christoph Hellwigcbbf4c02019-05-01 20:16:40 -0700263static void
264iomap_read_inline_data(struct inode *inode, struct page *page,
265 struct iomap *iomap)
266{
267 size_t size = i_size_read(inode);
268 void *addr;
269
270 if (PageUptodate(page))
271 return;
272
273 BUG_ON(page->index);
274 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
275
276 addr = kmap_atomic(page);
277 memcpy(addr, iomap->inline_data, size);
278 memset(addr + size, 0, PAGE_SIZE - size);
279 kunmap_atomic(addr);
280 SetPageUptodate(page);
281}
282
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700283static loff_t
284iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
285 struct iomap *iomap)
286{
287 struct iomap_readpage_ctx *ctx = data;
288 struct page *page = ctx->cur_page;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700289 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700290 bool is_contig = false;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700291 loff_t orig_pos = pos;
292 unsigned poff, plen;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700293 sector_t sector;
294
Andreas Gruenbacher806a1472018-07-03 09:07:47 -0700295 if (iomap->type == IOMAP_INLINE) {
Darrick J. Wong7d5e0492018-08-10 17:55:57 -0700296 WARN_ON_ONCE(pos);
Andreas Gruenbacher806a1472018-07-03 09:07:47 -0700297 iomap_read_inline_data(inode, page, iomap);
298 return PAGE_SIZE;
299 }
300
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700301 /* zero post-eof blocks as the page may be mapped */
302 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
303 if (plen == 0)
304 goto done;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700305
306 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
307 zero_user(page, poff, plen);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700308 iomap_set_range_uptodate(page, poff, plen);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700309 goto done;
310 }
311
312 ctx->cur_page_in_bio = true;
313
314 /*
315 * Try to merge into a previous segment if we can.
316 */
317 sector = iomap_sector(iomap, pos);
318 if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
Ming Lei07173c32019-02-15 19:13:20 +0800319 if (__bio_try_merge_page(ctx->bio, page, plen, poff, true))
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700320 goto done;
321 is_contig = true;
322 }
323
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700324 /*
325 * If we start a new segment we need to increase the read count, and we
326 * need to do so before submitting any previous full bio to make sure
327 * that we don't prematurely unlock the page.
328 */
329 if (iop)
330 atomic_inc(&iop->read_count);
331
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700332 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
333 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
334 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
335
336 if (ctx->bio)
337 submit_bio(ctx->bio);
338
339 if (ctx->is_readahead) /* same as readahead_gfp_mask */
340 gfp |= __GFP_NORETRY | __GFP_NOWARN;
341 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
342 ctx->bio->bi_opf = REQ_OP_READ;
343 if (ctx->is_readahead)
344 ctx->bio->bi_opf |= REQ_RAHEAD;
345 ctx->bio->bi_iter.bi_sector = sector;
346 bio_set_dev(ctx->bio, iomap->bdev);
347 ctx->bio->bi_end_io = iomap_read_end_io;
348 }
349
Ming Lei07173c32019-02-15 19:13:20 +0800350 bio_add_page(ctx->bio, page, plen, poff);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700351done:
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700352 /*
353 * Move the caller beyond our range so that it keeps making progress.
354 * For that we have to include any leading non-uptodate ranges, but
355 * we can skip trailing ones as they will be handled in the next
356 * iteration.
357 */
358 return pos - orig_pos + plen;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700359}
360
361int
362iomap_readpage(struct page *page, const struct iomap_ops *ops)
363{
364 struct iomap_readpage_ctx ctx = { .cur_page = page };
365 struct inode *inode = page->mapping->host;
366 unsigned poff;
367 loff_t ret;
368
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700369 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
370 ret = iomap_apply(inode, page_offset(page) + poff,
371 PAGE_SIZE - poff, 0, ops, &ctx,
372 iomap_readpage_actor);
373 if (ret <= 0) {
374 WARN_ON_ONCE(ret == 0);
375 SetPageError(page);
376 break;
377 }
378 }
379
380 if (ctx.bio) {
381 submit_bio(ctx.bio);
382 WARN_ON_ONCE(!ctx.cur_page_in_bio);
383 } else {
384 WARN_ON_ONCE(ctx.cur_page_in_bio);
385 unlock_page(page);
386 }
387
388 /*
389 * Just like mpage_readpages and block_read_full_page we always
390 * return 0 and just mark the page as PageError on errors. This
391 * should be cleaned up all through the stack eventually.
392 */
393 return 0;
394}
395EXPORT_SYMBOL_GPL(iomap_readpage);
396
397static struct page *
398iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
399 loff_t length, loff_t *done)
400{
401 while (!list_empty(pages)) {
402 struct page *page = lru_to_page(pages);
403
404 if (page_offset(page) >= (u64)pos + length)
405 break;
406
407 list_del(&page->lru);
408 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
409 GFP_NOFS))
410 return page;
411
412 /*
413 * If we already have a page in the page cache at index we are
414 * done. Upper layers don't care if it is uptodate after the
415 * readpages call itself as every page gets checked again once
416 * actually needed.
417 */
418 *done += PAGE_SIZE;
419 put_page(page);
420 }
421
422 return NULL;
423}
424
425static loff_t
426iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
427 void *data, struct iomap *iomap)
428{
429 struct iomap_readpage_ctx *ctx = data;
430 loff_t done, ret;
431
432 for (done = 0; done < length; done += ret) {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700433 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700434 if (!ctx->cur_page_in_bio)
435 unlock_page(ctx->cur_page);
436 put_page(ctx->cur_page);
437 ctx->cur_page = NULL;
438 }
439 if (!ctx->cur_page) {
440 ctx->cur_page = iomap_next_page(inode, ctx->pages,
441 pos, length, &done);
442 if (!ctx->cur_page)
443 break;
444 ctx->cur_page_in_bio = false;
445 }
446 ret = iomap_readpage_actor(inode, pos + done, length - done,
447 ctx, iomap);
448 }
449
450 return done;
451}
452
453int
454iomap_readpages(struct address_space *mapping, struct list_head *pages,
455 unsigned nr_pages, const struct iomap_ops *ops)
456{
457 struct iomap_readpage_ctx ctx = {
458 .pages = pages,
459 .is_readahead = true,
460 };
461 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
462 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
463 loff_t length = last - pos + PAGE_SIZE, ret = 0;
464
465 while (length > 0) {
466 ret = iomap_apply(mapping->host, pos, length, 0, ops,
467 &ctx, iomap_readpages_actor);
468 if (ret <= 0) {
469 WARN_ON_ONCE(ret == 0);
470 goto done;
471 }
472 pos += ret;
473 length -= ret;
474 }
475 ret = 0;
476done:
477 if (ctx.bio)
478 submit_bio(ctx.bio);
479 if (ctx.cur_page) {
480 if (!ctx.cur_page_in_bio)
481 unlock_page(ctx.cur_page);
482 put_page(ctx.cur_page);
483 }
484
485 /*
486 * Check that we didn't lose a page due to the arcance calling
487 * conventions..
488 */
489 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
490 return ret;
491}
492EXPORT_SYMBOL_GPL(iomap_readpages);
493
Eric Sandeen3cc31fa2018-12-21 08:42:50 -0800494/*
495 * iomap_is_partially_uptodate checks whether blocks within a page are
496 * uptodate or not.
497 *
498 * Returns true if all blocks which correspond to a file portion
499 * we want to read within the page are uptodate.
500 */
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700501int
502iomap_is_partially_uptodate(struct page *page, unsigned long from,
503 unsigned long count)
504{
505 struct iomap_page *iop = to_iomap_page(page);
506 struct inode *inode = page->mapping->host;
Eric Sandeen3cc31fa2018-12-21 08:42:50 -0800507 unsigned len, first, last;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700508 unsigned i;
509
Eric Sandeen3cc31fa2018-12-21 08:42:50 -0800510 /* Limit range to one page */
511 len = min_t(unsigned, PAGE_SIZE - from, count);
512
513 /* First and last blocks in range within page */
514 first = from >> inode->i_blkbits;
515 last = (from + len - 1) >> inode->i_blkbits;
516
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700517 if (iop) {
518 for (i = first; i <= last; i++)
519 if (!test_bit(i, iop->uptodate))
520 return 0;
521 return 1;
522 }
523
524 return 0;
525}
526EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
527
528int
529iomap_releasepage(struct page *page, gfp_t gfp_mask)
530{
531 /*
532 * mm accommodates an old ext3 case where clean pages might not have had
533 * the dirty bit cleared. Thus, it can send actual dirty pages to
534 * ->releasepage() via shrink_active_list(), skip those here.
535 */
536 if (PageDirty(page) || PageWriteback(page))
537 return 0;
538 iomap_page_release(page);
539 return 1;
540}
541EXPORT_SYMBOL_GPL(iomap_releasepage);
542
543void
544iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
545{
546 /*
547 * If we are invalidating the entire page, clear the dirty state from it
548 * and release it to avoid unnecessary buildup of the LRU.
549 */
550 if (offset == 0 && len == PAGE_SIZE) {
551 WARN_ON_ONCE(PageWriteback(page));
552 cancel_dirty_page(page);
553 iomap_page_release(page);
554 }
555}
556EXPORT_SYMBOL_GPL(iomap_invalidatepage);
557
558#ifdef CONFIG_MIGRATION
559int
560iomap_migrate_page(struct address_space *mapping, struct page *newpage,
561 struct page *page, enum migrate_mode mode)
562{
563 int ret;
564
Jan Karaab41ee62018-12-28 00:39:20 -0800565 ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700566 if (ret != MIGRATEPAGE_SUCCESS)
567 return ret;
568
569 if (page_has_private(page)) {
570 ClearPagePrivate(page);
Piotr Jaroszynski8e47a452019-01-27 08:46:45 -0800571 get_page(newpage);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700572 set_page_private(newpage, page_private(page));
573 set_page_private(page, 0);
Piotr Jaroszynski8e47a452019-01-27 08:46:45 -0800574 put_page(page);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700575 SetPagePrivate(newpage);
576 }
577
578 if (mode != MIGRATE_SYNC_NO_COPY)
579 migrate_page_copy(newpage, page);
580 else
581 migrate_page_states(newpage, page);
582 return MIGRATEPAGE_SUCCESS;
583}
584EXPORT_SYMBOL_GPL(iomap_migrate_page);
585#endif /* CONFIG_MIGRATION */
586
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700587static void
Christoph Hellwigae259a92016-06-21 09:23:11 +1000588iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
589{
590 loff_t i_size = i_size_read(inode);
591
592 /*
593 * Only truncate newly allocated pages beyoned EOF, even if the
594 * write started inside the existing inode size.
595 */
596 if (pos + len > i_size)
597 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
598}
599
600static int
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700601iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
602 unsigned poff, unsigned plen, unsigned from, unsigned to,
603 struct iomap *iomap)
604{
605 struct bio_vec bvec;
606 struct bio bio;
607
608 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
609 zero_user_segments(page, poff, from, to, poff + plen);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700610 iomap_set_range_uptodate(page, poff, plen);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700611 return 0;
612 }
613
614 bio_init(&bio, &bvec, 1);
615 bio.bi_opf = REQ_OP_READ;
616 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
617 bio_set_dev(&bio, iomap->bdev);
618 __bio_add_page(&bio, page, plen, poff);
619 return submit_bio_wait(&bio);
620}
621
622static int
623__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
624 struct page *page, struct iomap *iomap)
625{
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700626 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700627 loff_t block_size = i_blocksize(inode);
628 loff_t block_start = pos & ~(block_size - 1);
629 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700630 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700631 int status = 0;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700632
633 if (PageUptodate(page))
634 return 0;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700635
636 do {
637 iomap_adjust_read_range(inode, iop, &block_start,
638 block_end - block_start, &poff, &plen);
639 if (plen == 0)
640 break;
641
642 if ((from > poff && from < poff + plen) ||
643 (to > poff && to < poff + plen)) {
644 status = iomap_read_page_sync(inode, block_start, page,
645 poff, plen, from, to, iomap);
646 if (status)
647 break;
648 }
649
650 } while ((block_start += plen) < block_end);
651
652 return status;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700653}
654
655static int
Christoph Hellwigae259a92016-06-21 09:23:11 +1000656iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
657 struct page **pagep, struct iomap *iomap)
658{
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700659 const struct iomap_page_ops *page_ops = iomap->page_ops;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000660 pgoff_t index = pos >> PAGE_SHIFT;
661 struct page *page;
662 int status = 0;
663
664 BUG_ON(pos + len > iomap->offset + iomap->length);
665
Michal Hockod1908f52017-02-03 13:13:26 -0800666 if (fatal_signal_pending(current))
667 return -EINTR;
668
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700669 if (page_ops && page_ops->page_prepare) {
670 status = page_ops->page_prepare(inode, pos, len, iomap);
671 if (status)
672 return status;
673 }
674
Christoph Hellwigae259a92016-06-21 09:23:11 +1000675 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700676 if (!page) {
677 status = -ENOMEM;
678 goto out_no_page;
679 }
Christoph Hellwigae259a92016-06-21 09:23:11 +1000680
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700681 if (iomap->type == IOMAP_INLINE)
682 iomap_read_inline_data(inode, page, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700683 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700684 status = __block_write_begin_int(page, pos, len, NULL, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700685 else
686 status = __iomap_write_begin(inode, pos, len, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000687
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700688 if (unlikely(status))
689 goto out_unlock;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000690
691 *pagep = page;
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700692 return 0;
693
694out_unlock:
695 unlock_page(page);
696 put_page(page);
697 iomap_write_failed(inode, pos, len);
698
699out_no_page:
700 if (page_ops && page_ops->page_done)
701 page_ops->page_done(inode, pos, 0, NULL, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000702 return status;
703}
704
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700705int
706iomap_set_page_dirty(struct page *page)
707{
708 struct address_space *mapping = page_mapping(page);
709 int newly_dirty;
710
711 if (unlikely(!mapping))
712 return !TestSetPageDirty(page);
713
714 /*
715 * Lock out page->mem_cgroup migration to keep PageDirty
716 * synchronized with per-memcg dirty page counters.
717 */
718 lock_page_memcg(page);
719 newly_dirty = !TestSetPageDirty(page);
720 if (newly_dirty)
721 __set_page_dirty(page, mapping, 0);
722 unlock_page_memcg(page);
723
724 if (newly_dirty)
725 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
726 return newly_dirty;
727}
728EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
729
730static int
731__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
732 unsigned copied, struct page *page, struct iomap *iomap)
733{
734 flush_dcache_page(page);
735
736 /*
737 * The blocks that were entirely written will now be uptodate, so we
738 * don't have to worry about a readpage reading them and overwriting a
739 * partial write. However if we have encountered a short write and only
740 * partially written into a block, it will not be marked uptodate, so a
741 * readpage might come in and destroy our partial write.
742 *
743 * Do the simplest thing, and just treat any short write to a non
744 * uptodate page as a zero-length write, and force the caller to redo
745 * the whole thing.
746 */
Christoph Hellwigdbc582b2019-04-30 08:45:33 -0700747 if (unlikely(copied < len && !PageUptodate(page)))
748 return 0;
749 iomap_set_range_uptodate(page, offset_in_page(pos), len);
750 iomap_set_page_dirty(page);
751 return copied;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700752}
753
Christoph Hellwigae259a92016-06-21 09:23:11 +1000754static int
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700755iomap_write_end_inline(struct inode *inode, struct page *page,
756 struct iomap *iomap, loff_t pos, unsigned copied)
757{
758 void *addr;
759
760 WARN_ON_ONCE(!PageUptodate(page));
761 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
762
763 addr = kmap_atomic(page);
764 memcpy(iomap->inline_data + pos, addr + pos, copied);
765 kunmap_atomic(addr);
766
767 mark_inode_dirty(inode);
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700768 return copied;
769}
770
Christoph Hellwigae259a92016-06-21 09:23:11 +1000771static int
772iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700773 unsigned copied, struct page *page, struct iomap *iomap)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000774{
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700775 const struct iomap_page_ops *page_ops = iomap->page_ops;
Andreas Gruenbacher8d3e72a2019-06-27 17:28:40 -0700776 loff_t old_size = inode->i_size;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000777 int ret;
778
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700779 if (iomap->type == IOMAP_INLINE) {
780 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700781 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
Christoph Hellwigdbc582b2019-04-30 08:45:33 -0700782 ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
783 page, NULL);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700784 } else {
785 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700786 }
787
Andreas Gruenbacher8d3e72a2019-06-27 17:28:40 -0700788 /*
789 * Update the in-memory inode size after copying the data into the page
790 * cache. It's up to the file system to write the updated size to disk,
791 * preferably after I/O completion so that no stale data is exposed.
792 */
793 if (pos + ret > old_size) {
794 i_size_write(inode, pos + ret);
795 iomap->flags |= IOMAP_F_SIZE_CHANGED;
796 }
797 unlock_page(page);
798
799 if (old_size < pos)
800 pagecache_isize_extended(inode, old_size, pos);
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700801 if (page_ops && page_ops->page_done)
Andreas Gruenbacher36a7347d2019-06-27 17:28:41 -0700802 page_ops->page_done(inode, pos, ret, page, iomap);
Andreas Gruenbacher7a77dad2019-04-30 08:45:34 -0700803 put_page(page);
Christoph Hellwig63899c62018-06-19 15:10:56 -0700804
Christoph Hellwigae259a92016-06-21 09:23:11 +1000805 if (ret < len)
806 iomap_write_failed(inode, pos, len);
807 return ret;
808}
809
810static loff_t
811iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
812 struct iomap *iomap)
813{
814 struct iov_iter *i = data;
815 long status = 0;
816 ssize_t written = 0;
817 unsigned int flags = AOP_FLAG_NOFS;
818
Christoph Hellwigae259a92016-06-21 09:23:11 +1000819 do {
820 struct page *page;
821 unsigned long offset; /* Offset into pagecache page */
822 unsigned long bytes; /* Bytes to write to page */
823 size_t copied; /* Bytes copied from user */
824
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700825 offset = offset_in_page(pos);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000826 bytes = min_t(unsigned long, PAGE_SIZE - offset,
827 iov_iter_count(i));
828again:
829 if (bytes > length)
830 bytes = length;
831
832 /*
833 * Bring in the user page that we will copy from _first_.
834 * Otherwise there's a nasty deadlock on copying from the
835 * same page as we're writing to, without it being marked
836 * up-to-date.
837 *
838 * Not only is this an optimisation, but it is also required
839 * to check that the address is actually valid, when atomic
840 * usercopies are used, below.
841 */
842 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
843 status = -EFAULT;
844 break;
845 }
846
847 status = iomap_write_begin(inode, pos, bytes, flags, &page,
848 iomap);
849 if (unlikely(status))
850 break;
851
852 if (mapping_writably_mapped(inode->i_mapping))
853 flush_dcache_page(page);
854
Christoph Hellwigae259a92016-06-21 09:23:11 +1000855 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000856
857 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000858
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700859 status = iomap_write_end(inode, pos, bytes, copied, page,
860 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000861 if (unlikely(status < 0))
862 break;
863 copied = status;
864
865 cond_resched();
866
867 iov_iter_advance(i, copied);
868 if (unlikely(copied == 0)) {
869 /*
870 * If we were unable to copy any data at all, we must
871 * fall back to a single segment length write.
872 *
873 * If we didn't fallback here, we could livelock
874 * because not all segments in the iov can be copied at
875 * once without a pagefault.
876 */
877 bytes = min_t(unsigned long, PAGE_SIZE - offset,
878 iov_iter_single_seg_count(i));
879 goto again;
880 }
881 pos += copied;
882 written += copied;
883 length -= copied;
884
885 balance_dirty_pages_ratelimited(inode->i_mapping);
886 } while (iov_iter_count(i) && length);
887
888 return written ? written : status;
889}
890
891ssize_t
892iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800893 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000894{
895 struct inode *inode = iocb->ki_filp->f_mapping->host;
896 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
897
898 while (iov_iter_count(iter)) {
899 ret = iomap_apply(inode, pos, iov_iter_count(iter),
900 IOMAP_WRITE, ops, iter, iomap_write_actor);
901 if (ret <= 0)
902 break;
903 pos += ret;
904 written += ret;
905 }
906
907 return written ? written : ret;
908}
909EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
910
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000911static struct page *
912__iomap_read_page(struct inode *inode, loff_t offset)
913{
914 struct address_space *mapping = inode->i_mapping;
915 struct page *page;
916
917 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
918 if (IS_ERR(page))
919 return page;
920 if (!PageUptodate(page)) {
921 put_page(page);
922 return ERR_PTR(-EIO);
923 }
924 return page;
925}
926
927static loff_t
928iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
929 struct iomap *iomap)
930{
931 long status = 0;
932 ssize_t written = 0;
933
934 do {
935 struct page *page, *rpage;
936 unsigned long offset; /* Offset into pagecache page */
937 unsigned long bytes; /* Bytes to write to page */
938
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700939 offset = offset_in_page(pos);
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700940 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000941
942 rpage = __iomap_read_page(inode, pos);
943 if (IS_ERR(rpage))
944 return PTR_ERR(rpage);
945
946 status = iomap_write_begin(inode, pos, bytes,
Tetsuo Handac718a972017-05-08 15:58:59 -0700947 AOP_FLAG_NOFS, &page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000948 put_page(rpage);
949 if (unlikely(status))
950 return status;
951
952 WARN_ON_ONCE(!PageUptodate(page));
953
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700954 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000955 if (unlikely(status <= 0)) {
956 if (WARN_ON_ONCE(status == 0))
957 return -EIO;
958 return status;
959 }
960
961 cond_resched();
962
963 pos += status;
964 written += status;
965 length -= status;
966
967 balance_dirty_pages_ratelimited(inode->i_mapping);
968 } while (length);
969
970 return written;
971}
972
973int
974iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800975 const struct iomap_ops *ops)
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000976{
977 loff_t ret;
978
979 while (len) {
980 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
981 iomap_dirty_actor);
982 if (ret <= 0)
983 return ret;
984 pos += ret;
985 len -= ret;
986 }
987
988 return 0;
989}
990EXPORT_SYMBOL_GPL(iomap_file_dirty);
991
Christoph Hellwigae259a92016-06-21 09:23:11 +1000992static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
993 unsigned bytes, struct iomap *iomap)
994{
995 struct page *page;
996 int status;
997
Tetsuo Handac718a972017-05-08 15:58:59 -0700998 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
999 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001000 if (status)
1001 return status;
1002
1003 zero_user(page, offset, bytes);
1004 mark_page_accessed(page);
1005
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -07001006 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001007}
1008
Christoph Hellwig9a286f02016-06-21 09:31:39 +10001009static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
1010 struct iomap *iomap)
1011{
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001012 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
1013 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
Christoph Hellwig9a286f02016-06-21 09:31:39 +10001014}
1015
Christoph Hellwigae259a92016-06-21 09:23:11 +10001016static loff_t
1017iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
1018 void *data, struct iomap *iomap)
1019{
1020 bool *did_zero = data;
1021 loff_t written = 0;
1022 int status;
1023
1024 /* already zeroed? we're done. */
1025 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1026 return count;
1027
1028 do {
1029 unsigned offset, bytes;
1030
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001031 offset = offset_in_page(pos);
Christoph Hellwige28ae8e2017-08-11 12:45:35 -07001032 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001033
Christoph Hellwig9a286f02016-06-21 09:31:39 +10001034 if (IS_DAX(inode))
1035 status = iomap_dax_zero(pos, offset, bytes, iomap);
1036 else
1037 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001038 if (status < 0)
1039 return status;
1040
1041 pos += bytes;
1042 count -= bytes;
1043 written += bytes;
1044 if (did_zero)
1045 *did_zero = true;
1046 } while (count > 0);
1047
1048 return written;
1049}
1050
1051int
1052iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001053 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001054{
1055 loff_t ret;
1056
1057 while (len > 0) {
1058 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1059 ops, did_zero, iomap_zero_range_actor);
1060 if (ret <= 0)
1061 return ret;
1062
1063 pos += ret;
1064 len -= ret;
1065 }
1066
1067 return 0;
1068}
1069EXPORT_SYMBOL_GPL(iomap_zero_range);
1070
1071int
1072iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001073 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001074{
Fabian Frederick93407472017-02-27 14:28:32 -08001075 unsigned int blocksize = i_blocksize(inode);
1076 unsigned int off = pos & (blocksize - 1);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001077
1078 /* Block boundary? Nothing to do */
1079 if (!off)
1080 return 0;
1081 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1082}
1083EXPORT_SYMBOL_GPL(iomap_truncate_page);
1084
1085static loff_t
1086iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1087 void *data, struct iomap *iomap)
1088{
1089 struct page *page = data;
1090 int ret;
1091
Christoph Hellwigc03cea42018-06-19 15:10:58 -07001092 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1093 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1094 if (ret)
1095 return ret;
1096 block_commit_write(page, 0, length);
1097 } else {
1098 WARN_ON_ONCE(!PageUptodate(page));
Christoph Hellwig9dc55f12018-07-11 22:26:05 -07001099 iomap_page_create(inode, page);
Brian Foster561295a2018-09-29 13:51:01 +10001100 set_page_dirty(page);
Christoph Hellwigc03cea42018-06-19 15:10:58 -07001101 }
Christoph Hellwigae259a92016-06-21 09:23:11 +10001102
Christoph Hellwigae259a92016-06-21 09:23:11 +10001103 return length;
1104}
1105
Souptick Joarder5780a022018-10-26 15:02:59 -07001106vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001107{
1108 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -08001109 struct inode *inode = file_inode(vmf->vma->vm_file);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001110 unsigned long length;
1111 loff_t offset, size;
1112 ssize_t ret;
1113
1114 lock_page(page);
1115 size = i_size_read(inode);
1116 if ((page->mapping != inode->i_mapping) ||
1117 (page_offset(page) > size)) {
1118 /* We overload EFAULT to mean page got truncated */
1119 ret = -EFAULT;
1120 goto out_unlock;
1121 }
1122
1123 /* page is wholly or partially inside EOF */
1124 if (((page->index + 1) << PAGE_SHIFT) > size)
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001125 length = offset_in_page(size);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001126 else
1127 length = PAGE_SIZE;
1128
1129 offset = page_offset(page);
1130 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +11001131 ret = iomap_apply(inode, offset, length,
1132 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1133 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001134 if (unlikely(ret <= 0))
1135 goto out_unlock;
1136 offset += ret;
1137 length -= ret;
1138 }
1139
Christoph Hellwigae259a92016-06-21 09:23:11 +10001140 wait_for_stable_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -07001141 return VM_FAULT_LOCKED;
Christoph Hellwigae259a92016-06-21 09:23:11 +10001142out_unlock:
1143 unlock_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -07001144 return block_page_mkwrite_return(ret);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001145}
1146EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001147
1148struct fiemap_ctx {
1149 struct fiemap_extent_info *fi;
1150 struct iomap prev;
1151};
1152
1153static int iomap_to_fiemap(struct fiemap_extent_info *fi,
1154 struct iomap *iomap, u32 flags)
1155{
1156 switch (iomap->type) {
1157 case IOMAP_HOLE:
1158 /* skip holes */
1159 return 0;
1160 case IOMAP_DELALLOC:
1161 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
1162 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001163 case IOMAP_MAPPED:
1164 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001165 case IOMAP_UNWRITTEN:
1166 flags |= FIEMAP_EXTENT_UNWRITTEN;
1167 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001168 case IOMAP_INLINE:
1169 flags |= FIEMAP_EXTENT_DATA_INLINE;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001170 break;
1171 }
1172
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001173 if (iomap->flags & IOMAP_F_MERGED)
1174 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +10001175 if (iomap->flags & IOMAP_F_SHARED)
1176 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001177
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001178 return fiemap_fill_next_extent(fi, iomap->offset,
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -04001179 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001180 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001181}
1182
1183static loff_t
1184iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1185 struct iomap *iomap)
1186{
1187 struct fiemap_ctx *ctx = data;
1188 loff_t ret = length;
1189
1190 if (iomap->type == IOMAP_HOLE)
1191 return length;
1192
1193 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
1194 ctx->prev = *iomap;
1195 switch (ret) {
1196 case 0: /* success */
1197 return length;
1198 case 1: /* extent array full */
1199 return 0;
1200 default:
1201 return ret;
1202 }
1203}
1204
1205int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001206 loff_t start, loff_t len, const struct iomap_ops *ops)
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001207{
1208 struct fiemap_ctx ctx;
1209 loff_t ret;
1210
1211 memset(&ctx, 0, sizeof(ctx));
1212 ctx.fi = fi;
1213 ctx.prev.type = IOMAP_HOLE;
1214
1215 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
1216 if (ret)
1217 return ret;
1218
Dave Chinner8896b8f2016-08-17 08:41:10 +10001219 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
1220 ret = filemap_write_and_wait(inode->i_mapping);
1221 if (ret)
1222 return ret;
1223 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001224
1225 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +11001226 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001227 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +10001228 /* inode with no (attribute) mapping will give ENOENT */
1229 if (ret == -ENOENT)
1230 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001231 if (ret < 0)
1232 return ret;
1233 if (ret == 0)
1234 break;
1235
1236 start += ret;
1237 len -= ret;
1238 }
1239
1240 if (ctx.prev.type != IOMAP_HOLE) {
1241 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
1242 if (ret < 0)
1243 return ret;
1244 }
1245
1246 return 0;
1247}
1248EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001249
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001250/*
1251 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001252 * Returns true if found and updates @lastoff to the offset in file.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001253 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001254static bool
1255page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
1256 int whence)
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001257{
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001258 const struct address_space_operations *ops = inode->i_mapping->a_ops;
1259 unsigned int bsize = i_blocksize(inode), off;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001260 bool seek_data = whence == SEEK_DATA;
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001261 loff_t poff = page_offset(page);
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001262
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001263 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
1264 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001265
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001266 if (*lastoff < poff) {
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001267 /*
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001268 * Last offset smaller than the start of the page means we found
1269 * a hole:
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001270 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001271 if (whence == SEEK_HOLE)
1272 return true;
1273 *lastoff = poff;
1274 }
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001275
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001276 /*
1277 * Just check the page unless we can and should check block ranges:
1278 */
1279 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
1280 return PageUptodate(page) == seek_data;
1281
1282 lock_page(page);
1283 if (unlikely(page->mapping != inode->i_mapping))
1284 goto out_unlock_not_found;
1285
1286 for (off = 0; off < PAGE_SIZE; off += bsize) {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001287 if (offset_in_page(*lastoff) >= off + bsize)
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001288 continue;
1289 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
1290 unlock_page(page);
1291 return true;
1292 }
1293 *lastoff = poff + off + bsize;
1294 }
1295
1296out_unlock_not_found:
1297 unlock_page(page);
1298 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001299}
1300
1301/*
1302 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
1303 *
1304 * Within unwritten extents, the page cache determines which parts are holes
Christoph Hellwigbd56b3e2018-06-01 09:05:14 -07001305 * and which are data: uptodate buffer heads count as data; everything else
1306 * counts as a hole.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001307 *
1308 * Returns the resulting offset on successs, and -ENOENT otherwise.
1309 */
1310static loff_t
1311page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
1312 int whence)
1313{
1314 pgoff_t index = offset >> PAGE_SHIFT;
1315 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1316 loff_t lastoff = offset;
1317 struct pagevec pvec;
1318
1319 if (length <= 0)
1320 return -ENOENT;
1321
1322 pagevec_init(&pvec);
1323
1324 do {
1325 unsigned nr_pages, i;
1326
1327 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
1328 end - 1);
1329 if (nr_pages == 0)
1330 break;
1331
1332 for (i = 0; i < nr_pages; i++) {
1333 struct page *page = pvec.pages[i];
1334
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001335 if (page_seek_hole_data(inode, page, &lastoff, whence))
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001336 goto check_range;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001337 lastoff = page_offset(page) + PAGE_SIZE;
1338 }
1339 pagevec_release(&pvec);
1340 } while (index < end);
1341
1342 /* When no page at lastoff and we are not done, we found a hole. */
1343 if (whence != SEEK_HOLE)
1344 goto not_found;
1345
1346check_range:
1347 if (lastoff < offset + length)
1348 goto out;
1349not_found:
1350 lastoff = -ENOENT;
1351out:
1352 pagevec_release(&pvec);
1353 return lastoff;
1354}
1355
1356
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001357static loff_t
1358iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
1359 void *data, struct iomap *iomap)
1360{
1361 switch (iomap->type) {
1362 case IOMAP_UNWRITTEN:
1363 offset = page_cache_seek_hole_data(inode, offset, length,
1364 SEEK_HOLE);
1365 if (offset < 0)
1366 return length;
1367 /* fall through */
1368 case IOMAP_HOLE:
1369 *(loff_t *)data = offset;
1370 return 0;
1371 default:
1372 return length;
1373 }
1374}
1375
1376loff_t
1377iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1378{
1379 loff_t size = i_size_read(inode);
1380 loff_t length = size - offset;
1381 loff_t ret;
1382
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001383 /* Nothing to be found before or beyond the end of the file. */
1384 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001385 return -ENXIO;
1386
1387 while (length > 0) {
1388 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1389 &offset, iomap_seek_hole_actor);
1390 if (ret < 0)
1391 return ret;
1392 if (ret == 0)
1393 break;
1394
1395 offset += ret;
1396 length -= ret;
1397 }
1398
1399 return offset;
1400}
1401EXPORT_SYMBOL_GPL(iomap_seek_hole);
1402
1403static loff_t
1404iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
1405 void *data, struct iomap *iomap)
1406{
1407 switch (iomap->type) {
1408 case IOMAP_HOLE:
1409 return length;
1410 case IOMAP_UNWRITTEN:
1411 offset = page_cache_seek_hole_data(inode, offset, length,
1412 SEEK_DATA);
1413 if (offset < 0)
1414 return length;
1415 /*FALLTHRU*/
1416 default:
1417 *(loff_t *)data = offset;
1418 return 0;
1419 }
1420}
1421
1422loff_t
1423iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1424{
1425 loff_t size = i_size_read(inode);
1426 loff_t length = size - offset;
1427 loff_t ret;
1428
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001429 /* Nothing to be found before or beyond the end of the file. */
1430 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001431 return -ENXIO;
1432
1433 while (length > 0) {
1434 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1435 &offset, iomap_seek_data_actor);
1436 if (ret < 0)
1437 return ret;
1438 if (ret == 0)
1439 break;
1440
1441 offset += ret;
1442 length -= ret;
1443 }
1444
1445 if (length <= 0)
1446 return -ENXIO;
1447 return offset;
1448}
1449EXPORT_SYMBOL_GPL(iomap_seek_data);
1450
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001451/*
1452 * Private flags for iomap_dio, must not overlap with the public ones in
1453 * iomap.h:
1454 */
Dave Chinner3460cac2018-05-02 12:54:53 -07001455#define IOMAP_DIO_WRITE_FUA (1 << 28)
Dave Chinner4f8ff442018-05-02 12:54:52 -07001456#define IOMAP_DIO_NEED_SYNC (1 << 29)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001457#define IOMAP_DIO_WRITE (1 << 30)
1458#define IOMAP_DIO_DIRTY (1 << 31)
1459
1460struct iomap_dio {
1461 struct kiocb *iocb;
1462 iomap_dio_end_io_t *end_io;
1463 loff_t i_size;
1464 loff_t size;
1465 atomic_t ref;
1466 unsigned flags;
1467 int error;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001468 bool wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001469
1470 union {
1471 /* used during submission and for synchronous completion: */
1472 struct {
1473 struct iov_iter *iter;
1474 struct task_struct *waiter;
1475 struct request_queue *last_queue;
1476 blk_qc_t cookie;
1477 } submit;
1478
1479 /* used for aio completion: */
1480 struct {
1481 struct work_struct work;
1482 } aio;
1483 };
1484};
1485
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001486int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
1487{
1488 struct request_queue *q = READ_ONCE(kiocb->private);
1489
1490 if (!q)
1491 return 0;
1492 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
1493}
1494EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
1495
1496static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
1497 struct bio *bio)
1498{
1499 atomic_inc(&dio->ref);
1500
1501 if (dio->iocb->ki_flags & IOCB_HIPRI)
1502 bio_set_polled(bio, dio->iocb);
1503
1504 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1505 dio->submit.cookie = submit_bio(bio);
1506}
1507
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001508static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1509{
1510 struct kiocb *iocb = dio->iocb;
Lukas Czerner332391a2017-09-21 08:16:29 -06001511 struct inode *inode = file_inode(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001512 loff_t offset = iocb->ki_pos;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001513 ssize_t ret;
1514
1515 if (dio->end_io) {
1516 ret = dio->end_io(iocb,
1517 dio->error ? dio->error : dio->size,
1518 dio->flags);
1519 } else {
1520 ret = dio->error;
1521 }
1522
1523 if (likely(!ret)) {
1524 ret = dio->size;
1525 /* check for short read */
Eryu Guan5e25c262017-10-13 09:47:46 -07001526 if (offset + ret > dio->i_size &&
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001527 !(dio->flags & IOMAP_DIO_WRITE))
Eryu Guan5e25c262017-10-13 09:47:46 -07001528 ret = dio->i_size - offset;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001529 iocb->ki_pos += ret;
1530 }
1531
Eryu Guan5e25c262017-10-13 09:47:46 -07001532 /*
1533 * Try again to invalidate clean pages which might have been cached by
1534 * non-direct readahead, or faulted in by get_user_pages() if the source
1535 * of the write was an mmap'ed region of the file we're writing. Either
1536 * one is a pretty crazy thing to do, so we don't support it 100%. If
1537 * this invalidation fails, tough, the write still worked...
1538 *
1539 * And this page cache invalidation has to be after dio->end_io(), as
1540 * some filesystems convert unwritten extents to real allocations in
1541 * end_io() when necessary, otherwise a racing buffer read would cache
1542 * zeros from unwritten extents.
1543 */
1544 if (!dio->error &&
1545 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
1546 int err;
1547 err = invalidate_inode_pages2_range(inode->i_mapping,
1548 offset >> PAGE_SHIFT,
1549 (offset + dio->size - 1) >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001550 if (err)
1551 dio_warn_stale_pagecache(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001552 }
1553
Dave Chinner4f8ff442018-05-02 12:54:52 -07001554 /*
1555 * If this is a DSYNC write, make sure we push it to stable storage now
1556 * that we've written data.
1557 */
1558 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
1559 ret = generic_write_sync(iocb, ret);
1560
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001561 inode_dio_end(file_inode(iocb->ki_filp));
1562 kfree(dio);
1563
1564 return ret;
1565}
1566
1567static void iomap_dio_complete_work(struct work_struct *work)
1568{
1569 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
1570 struct kiocb *iocb = dio->iocb;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001571
Dave Chinner4f8ff442018-05-02 12:54:52 -07001572 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001573}
1574
1575/*
1576 * Set an error in the dio if none is set yet. We have to use cmpxchg
1577 * as the submission context and the completion context(s) can race to
1578 * update the error.
1579 */
1580static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
1581{
1582 cmpxchg(&dio->error, 0, ret);
1583}
1584
1585static void iomap_dio_bio_end_io(struct bio *bio)
1586{
1587 struct iomap_dio *dio = bio->bi_private;
1588 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1589
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001590 if (bio->bi_status)
1591 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001592
1593 if (atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001594 if (dio->wait_for_completion) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001595 struct task_struct *waiter = dio->submit.waiter;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001596 WRITE_ONCE(dio->submit.waiter, NULL);
Jens Axboe06193172018-11-13 21:16:54 -07001597 blk_wake_io_task(waiter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001598 } else if (dio->flags & IOMAP_DIO_WRITE) {
1599 struct inode *inode = file_inode(dio->iocb->ki_filp);
1600
1601 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
1602 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
1603 } else {
1604 iomap_dio_complete_work(&dio->aio.work);
1605 }
1606 }
1607
1608 if (should_dirty) {
1609 bio_check_pages_dirty(bio);
1610 } else {
Jens Axboe399254a2019-02-27 13:13:23 -07001611 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
1612 struct bvec_iter_all iter_all;
1613 struct bio_vec *bvec;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001614
Christoph Hellwig2b070cf2019-04-25 09:03:00 +02001615 bio_for_each_segment_all(bvec, bio, iter_all)
Jens Axboe399254a2019-02-27 13:13:23 -07001616 put_page(bvec->bv_page);
1617 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001618 bio_put(bio);
1619 }
1620}
1621
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001622static void
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001623iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
1624 unsigned len)
1625{
1626 struct page *page = ZERO_PAGE(0);
Jens Axboed1e36282018-08-29 10:36:56 -06001627 int flags = REQ_SYNC | REQ_IDLE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001628 struct bio *bio;
1629
1630 bio = bio_alloc(GFP_KERNEL, 1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001631 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001632 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001633 bio->bi_private = dio;
1634 bio->bi_end_io = iomap_dio_bio_end_io;
1635
1636 get_page(page);
Christoph Hellwig6533b4e2018-06-01 09:03:07 -07001637 __bio_add_page(bio, page, len, 0);
Jens Axboed1e36282018-08-29 10:36:56 -06001638 bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001639 iomap_dio_submit_bio(dio, iomap, bio);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001640}
1641
1642static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001643iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1644 struct iomap_dio *dio, struct iomap *iomap)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001645{
Fabian Frederick93407472017-02-27 14:28:32 -08001646 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1647 unsigned int fs_block_size = i_blocksize(inode), pad;
1648 unsigned int align = iov_iter_alignment(dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001649 struct iov_iter iter;
1650 struct bio *bio;
1651 bool need_zeroout = false;
Dave Chinner3460cac2018-05-02 12:54:53 -07001652 bool use_fua = false;
Dave Chinner4721a602018-11-19 13:31:11 -08001653 int nr_pages, ret = 0;
Al Virocfe057f2017-09-11 21:17:09 +01001654 size_t copied = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001655
1656 if ((pos | length | align) & ((1 << blkbits) - 1))
1657 return -EINVAL;
1658
Christoph Hellwig09230432018-07-03 09:07:46 -07001659 if (iomap->type == IOMAP_UNWRITTEN) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001660 dio->flags |= IOMAP_DIO_UNWRITTEN;
1661 need_zeroout = true;
Christoph Hellwig09230432018-07-03 09:07:46 -07001662 }
1663
1664 if (iomap->flags & IOMAP_F_SHARED)
1665 dio->flags |= IOMAP_DIO_COW;
1666
1667 if (iomap->flags & IOMAP_F_NEW) {
1668 need_zeroout = true;
Dave Chinner0929d852018-11-19 13:31:10 -08001669 } else if (iomap->type == IOMAP_MAPPED) {
Christoph Hellwig09230432018-07-03 09:07:46 -07001670 /*
Dave Chinner0929d852018-11-19 13:31:10 -08001671 * Use a FUA write if we need datasync semantics, this is a pure
1672 * data IO that doesn't require any metadata updates (including
1673 * after IO completion such as unwritten extent conversion) and
1674 * the underlying device supports FUA. This allows us to avoid
1675 * cache flushes on IO completion.
Christoph Hellwig09230432018-07-03 09:07:46 -07001676 */
1677 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1678 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1679 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1680 use_fua = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001681 }
1682
1683 /*
1684 * Operate on a partial iter trimmed to the extent we were called for.
1685 * We'll update the iter in the dio once we're done with this extent.
1686 */
1687 iter = *dio->submit.iter;
1688 iov_iter_truncate(&iter, length);
1689
1690 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1691 if (nr_pages <= 0)
1692 return nr_pages;
1693
1694 if (need_zeroout) {
1695 /* zero out from the start of the block to the write offset */
1696 pad = pos & (fs_block_size - 1);
1697 if (pad)
1698 iomap_dio_zero(dio, iomap, pos - pad, pad);
1699 }
1700
1701 do {
Al Virocfe057f2017-09-11 21:17:09 +01001702 size_t n;
1703 if (dio->error) {
1704 iov_iter_revert(dio->submit.iter, copied);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001705 return 0;
Al Virocfe057f2017-09-11 21:17:09 +01001706 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001707
1708 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001709 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001710 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Jens Axboe45d06cf2017-06-27 11:01:22 -06001711 bio->bi_write_hint = dio->iocb->ki_hint;
Adam Manzanares087e5662018-05-22 10:52:21 -07001712 bio->bi_ioprio = dio->iocb->ki_ioprio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001713 bio->bi_private = dio;
1714 bio->bi_end_io = iomap_dio_bio_end_io;
1715
1716 ret = bio_iov_iter_get_pages(bio, &iter);
1717 if (unlikely(ret)) {
Dave Chinner4721a602018-11-19 13:31:11 -08001718 /*
1719 * We have to stop part way through an IO. We must fall
1720 * through to the sub-block tail zeroing here, otherwise
1721 * this short IO may expose stale data in the tail of
1722 * the block we haven't written data to.
1723 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001724 bio_put(bio);
Dave Chinner4721a602018-11-19 13:31:11 -08001725 goto zero_tail;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001726 }
1727
Al Virocfe057f2017-09-11 21:17:09 +01001728 n = bio->bi_iter.bi_size;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001729 if (dio->flags & IOMAP_DIO_WRITE) {
Dave Chinner3460cac2018-05-02 12:54:53 -07001730 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1731 if (use_fua)
1732 bio->bi_opf |= REQ_FUA;
1733 else
1734 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
Al Virocfe057f2017-09-11 21:17:09 +01001735 task_io_account_write(n);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001736 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001737 bio->bi_opf = REQ_OP_READ;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001738 if (dio->flags & IOMAP_DIO_DIRTY)
1739 bio_set_pages_dirty(bio);
1740 }
1741
Al Virocfe057f2017-09-11 21:17:09 +01001742 iov_iter_advance(dio->submit.iter, n);
1743
1744 dio->size += n;
1745 pos += n;
1746 copied += n;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001747
1748 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001749 iomap_dio_submit_bio(dio, iomap, bio);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001750 } while (nr_pages);
1751
Dave Chinnerb4506722018-11-19 13:31:10 -08001752 /*
1753 * We need to zeroout the tail of a sub-block write if the extent type
1754 * requires zeroing or the write extends beyond EOF. If we don't zero
1755 * the block tail in the latter case, we can expose stale data via mmap
1756 * reads of the EOF block.
1757 */
Dave Chinner4721a602018-11-19 13:31:11 -08001758zero_tail:
Dave Chinnerb4506722018-11-19 13:31:10 -08001759 if (need_zeroout ||
1760 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001761 /* zero out from the end of the write to the end of the block */
1762 pad = pos & (fs_block_size - 1);
1763 if (pad)
1764 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1765 }
Dave Chinner4721a602018-11-19 13:31:11 -08001766 return copied ? copied : ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001767}
1768
Christoph Hellwig09230432018-07-03 09:07:46 -07001769static loff_t
1770iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
1771{
1772 length = iov_iter_zero(length, dio->submit.iter);
1773 dio->size += length;
1774 return length;
1775}
1776
1777static loff_t
Andreas Gruenbacherec181f62018-07-03 09:07:47 -07001778iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
1779 struct iomap_dio *dio, struct iomap *iomap)
1780{
1781 struct iov_iter *iter = dio->submit.iter;
1782 size_t copied;
1783
1784 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
1785
1786 if (dio->flags & IOMAP_DIO_WRITE) {
1787 loff_t size = inode->i_size;
1788
1789 if (pos > size)
1790 memset(iomap->inline_data + size, 0, pos - size);
1791 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
1792 if (copied) {
1793 if (pos + copied > size)
1794 i_size_write(inode, pos + copied);
1795 mark_inode_dirty(inode);
1796 }
1797 } else {
1798 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
1799 }
1800 dio->size += copied;
1801 return copied;
1802}
1803
1804static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001805iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1806 void *data, struct iomap *iomap)
1807{
1808 struct iomap_dio *dio = data;
1809
1810 switch (iomap->type) {
1811 case IOMAP_HOLE:
1812 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1813 return -EIO;
1814 return iomap_dio_hole_actor(length, dio);
1815 case IOMAP_UNWRITTEN:
1816 if (!(dio->flags & IOMAP_DIO_WRITE))
1817 return iomap_dio_hole_actor(length, dio);
1818 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1819 case IOMAP_MAPPED:
1820 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
Andreas Gruenbacherec181f62018-07-03 09:07:47 -07001821 case IOMAP_INLINE:
1822 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
Christoph Hellwig09230432018-07-03 09:07:46 -07001823 default:
1824 WARN_ON_ONCE(1);
1825 return -EIO;
1826 }
1827}
1828
Dave Chinner4f8ff442018-05-02 12:54:52 -07001829/*
1830 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
Dave Chinner3460cac2018-05-02 12:54:53 -07001831 * is being issued as AIO or not. This allows us to optimise pure data writes
1832 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1833 * REQ_FLUSH post write. This is slightly tricky because a single request here
1834 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1835 * may be pure data writes. In that case, we still need to do a full data sync
1836 * completion.
Dave Chinner4f8ff442018-05-02 12:54:52 -07001837 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001838ssize_t
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001839iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1840 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001841{
1842 struct address_space *mapping = iocb->ki_filp->f_mapping;
1843 struct inode *inode = file_inode(iocb->ki_filp);
1844 size_t count = iov_iter_count(iter);
Eryu Guanc771c142017-03-02 15:02:06 -08001845 loff_t pos = iocb->ki_pos, start = pos;
1846 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001847 unsigned int flags = IOMAP_DIRECT;
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001848 bool wait_for_completion = is_sync_kiocb(iocb);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001849 struct blk_plug plug;
1850 struct iomap_dio *dio;
1851
1852 lockdep_assert_held(&inode->i_rwsem);
1853
1854 if (!count)
1855 return 0;
1856
1857 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1858 if (!dio)
1859 return -ENOMEM;
1860
1861 dio->iocb = iocb;
1862 atomic_set(&dio->ref, 1);
1863 dio->size = 0;
1864 dio->i_size = i_size_read(inode);
1865 dio->end_io = end_io;
1866 dio->error = 0;
1867 dio->flags = 0;
1868
1869 dio->submit.iter = iter;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001870 dio->submit.waiter = current;
1871 dio->submit.cookie = BLK_QC_T_NONE;
1872 dio->submit.last_queue = NULL;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001873
1874 if (iov_iter_rw(iter) == READ) {
1875 if (pos >= dio->i_size)
1876 goto out_free_dio;
1877
David Howells00e23702018-10-22 13:07:28 +01001878 if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001879 dio->flags |= IOMAP_DIO_DIRTY;
1880 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001881 flags |= IOMAP_WRITE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001882 dio->flags |= IOMAP_DIO_WRITE;
Dave Chinner3460cac2018-05-02 12:54:53 -07001883
1884 /* for data sync or sync, we need sync completion processing */
Dave Chinner4f8ff442018-05-02 12:54:52 -07001885 if (iocb->ki_flags & IOCB_DSYNC)
1886 dio->flags |= IOMAP_DIO_NEED_SYNC;
Dave Chinner3460cac2018-05-02 12:54:53 -07001887
1888 /*
1889 * For datasync only writes, we optimistically try using FUA for
1890 * this IO. Any non-FUA write that occurs will clear this flag,
1891 * hence we know before completion whether a cache flush is
1892 * necessary.
1893 */
1894 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1895 dio->flags |= IOMAP_DIO_WRITE_FUA;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001896 }
1897
Goldwyn Rodriguesa38d1242017-06-20 07:05:45 -05001898 if (iocb->ki_flags & IOCB_NOWAIT) {
1899 if (filemap_range_has_page(mapping, start, end)) {
1900 ret = -EAGAIN;
1901 goto out_free_dio;
1902 }
1903 flags |= IOMAP_NOWAIT;
1904 }
1905
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001906 ret = filemap_write_and_wait_range(mapping, start, end);
1907 if (ret)
1908 goto out_free_dio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001909
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001910 /*
1911 * Try to invalidate cache pages for the range we're direct
1912 * writing. If this invalidation fails, tough, the write will
1913 * still work, but racing two incompatible write paths is a
1914 * pretty crazy thing to do, so we don't support it 100%.
1915 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001916 ret = invalidate_inode_pages2_range(mapping,
1917 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001918 if (ret)
1919 dio_warn_stale_pagecache(iocb->ki_filp);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001920 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001921
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001922 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
Chandan Rajendra546e7be2017-09-22 11:47:33 -07001923 !inode->i_sb->s_dio_done_wq) {
1924 ret = sb_init_dio_done_wq(inode->i_sb);
1925 if (ret < 0)
1926 goto out_free_dio;
1927 }
1928
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001929 inode_dio_begin(inode);
1930
1931 blk_start_plug(&plug);
1932 do {
1933 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1934 iomap_dio_actor);
1935 if (ret <= 0) {
1936 /* magic error code to fall back to buffered I/O */
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001937 if (ret == -ENOTBLK) {
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001938 wait_for_completion = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001939 ret = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001940 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001941 break;
1942 }
1943 pos += ret;
Chandan Rajendraa008c312017-04-12 11:03:20 -07001944
1945 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1946 break;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001947 } while ((count = iov_iter_count(iter)) > 0);
1948 blk_finish_plug(&plug);
1949
1950 if (ret < 0)
1951 iomap_dio_set_error(dio, ret);
1952
Dave Chinner3460cac2018-05-02 12:54:53 -07001953 /*
1954 * If all the writes we issued were FUA, we don't need to flush the
1955 * cache on IO completion. Clear the sync flag for this case.
1956 */
1957 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1958 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1959
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001960 WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
1961 WRITE_ONCE(iocb->private, dio->submit.last_queue);
1962
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001963 /*
1964 * We are about to drop our additional submission reference, which
1965 * might be the last reference to the dio. There are three three
1966 * different ways we can progress here:
1967 *
1968 * (a) If this is the last reference we will always complete and free
1969 * the dio ourselves.
1970 * (b) If this is not the last reference, and we serve an asynchronous
1971 * iocb, we must never touch the dio after the decrement, the
1972 * I/O completion handler will complete and free it.
1973 * (c) If this is not the last reference, but we serve a synchronous
1974 * iocb, the I/O completion handler will wake us up on the drop
1975 * of the final reference, and we will complete and free it here
1976 * after we got woken by the I/O completion handler.
1977 */
1978 dio->wait_for_completion = wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001979 if (!atomic_dec_and_test(&dio->ref)) {
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001980 if (!wait_for_completion)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001981 return -EIOCBQUEUED;
1982
1983 for (;;) {
Linus Torvalds1ac5cd42019-01-02 10:46:03 -08001984 set_current_state(TASK_UNINTERRUPTIBLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001985 if (!READ_ONCE(dio->submit.waiter))
1986 break;
1987
1988 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1989 !dio->submit.last_queue ||
Christoph Hellwigea435e12017-11-02 21:29:54 +03001990 !blk_poll(dio->submit.last_queue,
Jens Axboe0a1b8b82018-11-26 08:24:43 -07001991 dio->submit.cookie, true))
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001992 io_schedule();
1993 }
1994 __set_current_state(TASK_RUNNING);
1995 }
1996
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001997 return iomap_dio_complete(dio);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001998
1999out_free_dio:
2000 kfree(dio);
2001 return ret;
2002}
2003EXPORT_SYMBOL_GPL(iomap_dio_rw);
Darrick J. Wong67482122018-05-10 08:38:15 -07002004
2005/* Swapfile activation */
2006
2007#ifdef CONFIG_SWAP
2008struct iomap_swapfile_info {
2009 struct iomap iomap; /* accumulated iomap */
2010 struct swap_info_struct *sis;
2011 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
2012 uint64_t highest_ppage; /* highest physical addr seen (pages) */
2013 unsigned long nr_pages; /* number of pages collected */
2014 int nr_extents; /* extent count */
2015};
2016
2017/*
2018 * Collect physical extents for this swap file. Physical extents reported to
2019 * the swap code must be trimmed to align to a page boundary. The logical
2020 * offset within the file is irrelevant since the swapfile code maps logical
2021 * page numbers of the swap device to the physical page-aligned extents.
2022 */
2023static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
2024{
2025 struct iomap *iomap = &isi->iomap;
2026 unsigned long nr_pages;
2027 uint64_t first_ppage;
2028 uint64_t first_ppage_reported;
2029 uint64_t next_ppage;
2030 int error;
2031
2032 /*
2033 * Round the start up and the end down so that the physical
2034 * extent aligns to a page boundary.
2035 */
2036 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
2037 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
2038 PAGE_SHIFT;
2039
2040 /* Skip too-short physical extents. */
2041 if (first_ppage >= next_ppage)
2042 return 0;
2043 nr_pages = next_ppage - first_ppage;
2044
2045 /*
2046 * Calculate how much swap space we're adding; the first page contains
2047 * the swap header and doesn't count. The mm still wants that first
2048 * page fed to add_swap_extent, however.
2049 */
2050 first_ppage_reported = first_ppage;
2051 if (iomap->offset == 0)
2052 first_ppage_reported++;
2053 if (isi->lowest_ppage > first_ppage_reported)
2054 isi->lowest_ppage = first_ppage_reported;
2055 if (isi->highest_ppage < (next_ppage - 1))
2056 isi->highest_ppage = next_ppage - 1;
2057
2058 /* Add extent, set up for the next call. */
2059 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
2060 if (error < 0)
2061 return error;
2062 isi->nr_extents += error;
2063 isi->nr_pages += nr_pages;
2064 return 0;
2065}
2066
2067/*
2068 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
2069 * swap only cares about contiguous page-aligned physical extents and makes no
2070 * distinction between written and unwritten extents.
2071 */
2072static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
2073 loff_t count, void *data, struct iomap *iomap)
2074{
2075 struct iomap_swapfile_info *isi = data;
2076 int error;
2077
Christoph Hellwig19319b52018-06-01 09:03:06 -07002078 switch (iomap->type) {
2079 case IOMAP_MAPPED:
2080 case IOMAP_UNWRITTEN:
2081 /* Only real or unwritten extents. */
2082 break;
2083 case IOMAP_INLINE:
2084 /* No inline data. */
Omar Sandovalec601922018-05-16 11:13:34 -07002085 pr_err("swapon: file is inline\n");
2086 return -EINVAL;
Christoph Hellwig19319b52018-06-01 09:03:06 -07002087 default:
Omar Sandovalec601922018-05-16 11:13:34 -07002088 pr_err("swapon: file has unallocated extents\n");
2089 return -EINVAL;
2090 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002091
Omar Sandovalec601922018-05-16 11:13:34 -07002092 /* No uncommitted metadata or shared blocks. */
2093 if (iomap->flags & IOMAP_F_DIRTY) {
2094 pr_err("swapon: file is not committed\n");
2095 return -EINVAL;
2096 }
2097 if (iomap->flags & IOMAP_F_SHARED) {
2098 pr_err("swapon: file has shared extents\n");
2099 return -EINVAL;
2100 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002101
Omar Sandovalec601922018-05-16 11:13:34 -07002102 /* Only one bdev per swap file. */
2103 if (iomap->bdev != isi->sis->bdev) {
2104 pr_err("swapon: file is on multiple devices\n");
2105 return -EINVAL;
2106 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002107
2108 if (isi->iomap.length == 0) {
2109 /* No accumulated extent, so just store it. */
2110 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2111 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
2112 /* Append this to the accumulated extent. */
2113 isi->iomap.length += iomap->length;
2114 } else {
2115 /* Otherwise, add the retained iomap and store this one. */
2116 error = iomap_swapfile_add_extent(isi);
2117 if (error)
2118 return error;
2119 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2120 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002121 return count;
Darrick J. Wong67482122018-05-10 08:38:15 -07002122}
2123
2124/*
2125 * Iterate a swap file's iomaps to construct physical extents that can be
2126 * passed to the swapfile subsystem.
2127 */
2128int iomap_swapfile_activate(struct swap_info_struct *sis,
2129 struct file *swap_file, sector_t *pagespan,
2130 const struct iomap_ops *ops)
2131{
2132 struct iomap_swapfile_info isi = {
2133 .sis = sis,
2134 .lowest_ppage = (sector_t)-1ULL,
2135 };
2136 struct address_space *mapping = swap_file->f_mapping;
2137 struct inode *inode = mapping->host;
2138 loff_t pos = 0;
2139 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
2140 loff_t ret;
2141
Darrick J. Wong117a1482018-06-05 09:53:05 -07002142 /*
2143 * Persist all file mapping metadata so that we won't have any
2144 * IOMAP_F_DIRTY iomaps.
2145 */
2146 ret = vfs_fsync(swap_file, 1);
Darrick J. Wong67482122018-05-10 08:38:15 -07002147 if (ret)
2148 return ret;
2149
2150 while (len > 0) {
2151 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
2152 ops, &isi, iomap_swapfile_activate_actor);
2153 if (ret <= 0)
2154 return ret;
2155
2156 pos += ret;
2157 len -= ret;
2158 }
2159
2160 if (isi.iomap.length) {
2161 ret = iomap_swapfile_add_extent(&isi);
2162 if (ret)
2163 return ret;
2164 }
2165
2166 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
2167 sis->max = isi.nr_pages;
2168 sis->pages = isi.nr_pages - 1;
2169 sis->highest_bit = isi.nr_pages - 1;
2170 return isi.nr_extents;
2171}
2172EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
2173#endif /* CONFIG_SWAP */
Christoph Hellwig89eb1902018-06-01 09:03:08 -07002174
2175static loff_t
2176iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
2177 void *data, struct iomap *iomap)
2178{
2179 sector_t *bno = data, addr;
2180
2181 if (iomap->type == IOMAP_MAPPED) {
2182 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
2183 if (addr > INT_MAX)
2184 WARN(1, "would truncate bmap result\n");
2185 else
2186 *bno = addr;
2187 }
2188 return 0;
2189}
2190
2191/* legacy ->bmap interface. 0 is the error return (!) */
2192sector_t
2193iomap_bmap(struct address_space *mapping, sector_t bno,
2194 const struct iomap_ops *ops)
2195{
2196 struct inode *inode = mapping->host;
Eric Sandeen79b3dbe2018-08-02 13:09:27 -07002197 loff_t pos = bno << inode->i_blkbits;
Christoph Hellwig89eb1902018-06-01 09:03:08 -07002198 unsigned blocksize = i_blocksize(inode);
2199
2200 if (filemap_write_and_wait(mapping))
2201 return 0;
2202
2203 bno = 0;
2204 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
2205 return bno;
2206}
2207EXPORT_SYMBOL_GPL(iomap_bmap);