blob: df2652b0d85dcc495ef3dd4b428916687679b7f0 [file] [log] [blame]
Christoph Hellwigae259a92016-06-21 09:23:11 +10001/*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/pagemap.h>
23#include <linux/file.h>
24#include <linux/uio.h>
25#include <linux/backing-dev.h>
26#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110027#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100028#include <linux/dax.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010029#include <linux/sched/signal.h>
Darrick J. Wong67482122018-05-10 08:38:15 -070030#include <linux/swap.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010031
Christoph Hellwigae259a92016-06-21 09:23:11 +100032#include "internal.h"
33
Christoph Hellwigae259a92016-06-21 09:23:11 +100034/*
35 * Execute a iomap write on a segment of the mapping that spans a
36 * contiguous range of pages that have identical block mapping state.
37 *
38 * This avoids the need to map pages individually, do individual allocations
39 * for each page and most importantly avoid the need for filesystem specific
40 * locking per page. Instead, all the operations are amortised over the entire
41 * range of pages. It is assumed that the filesystems will lock whatever
42 * resources they require in the iomap_begin call, and release them in the
43 * iomap_end call.
44 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100045loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100046iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080047 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
Christoph Hellwigae259a92016-06-21 09:23:11 +100048{
49 struct iomap iomap = { 0 };
50 loff_t written = 0, ret;
51
52 /*
53 * Need to map a range from start position for length bytes. This can
54 * span multiple pages - it is only guaranteed to return a range of a
55 * single type of pages (e.g. all into a hole, all mapped or all
56 * unwritten). Failure at this point has nothing to undo.
57 *
58 * If allocation is required for this range, reserve the space now so
59 * that the allocation is guaranteed to succeed later on. Once we copy
60 * the data into the page cache pages, then we cannot fail otherwise we
61 * expose transient stale data. If the reserve fails, we can safely
62 * back out at this point as there is nothing to undo.
63 */
64 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
65 if (ret)
66 return ret;
67 if (WARN_ON(iomap.offset > pos))
68 return -EIO;
Darrick J. Wong0c6dda72018-01-26 11:11:20 -080069 if (WARN_ON(iomap.length == 0))
70 return -EIO;
Christoph Hellwigae259a92016-06-21 09:23:11 +100071
72 /*
73 * Cut down the length to the one actually provided by the filesystem,
74 * as it might not be able to give us the whole size that we requested.
75 */
76 if (iomap.offset + iomap.length < pos + length)
77 length = iomap.offset + iomap.length - pos;
78
79 /*
80 * Now that we have guaranteed that the space allocation will succeed.
81 * we can do the copy-in page by page without having to worry about
82 * failures exposing transient data.
83 */
84 written = actor(inode, pos, length, data, &iomap);
85
86 /*
87 * Now the data has been copied, commit the range we've copied. This
88 * should not fail unless the filesystem has had a fatal error.
89 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100090 if (ops->iomap_end) {
91 ret = ops->iomap_end(inode, pos, length,
92 written > 0 ? written : 0,
93 flags, &iomap);
94 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100095
96 return written ? written : ret;
97}
98
99static void
100iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
101{
102 loff_t i_size = i_size_read(inode);
103
104 /*
105 * Only truncate newly allocated pages beyoned EOF, even if the
106 * write started inside the existing inode size.
107 */
108 if (pos + len > i_size)
109 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
110}
111
112static int
113iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
114 struct page **pagep, struct iomap *iomap)
115{
116 pgoff_t index = pos >> PAGE_SHIFT;
117 struct page *page;
118 int status = 0;
119
120 BUG_ON(pos + len > iomap->offset + iomap->length);
121
Michal Hockod1908f52017-02-03 13:13:26 -0800122 if (fatal_signal_pending(current))
123 return -EINTR;
124
Christoph Hellwigae259a92016-06-21 09:23:11 +1000125 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
126 if (!page)
127 return -ENOMEM;
128
129 status = __block_write_begin_int(page, pos, len, NULL, iomap);
130 if (unlikely(status)) {
131 unlock_page(page);
132 put_page(page);
133 page = NULL;
134
135 iomap_write_failed(inode, pos, len);
136 }
137
138 *pagep = page;
139 return status;
140}
141
142static int
143iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
144 unsigned copied, struct page *page)
145{
146 int ret;
147
148 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
149 copied, page, NULL);
150 if (ret < len)
151 iomap_write_failed(inode, pos, len);
152 return ret;
153}
154
155static loff_t
156iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
157 struct iomap *iomap)
158{
159 struct iov_iter *i = data;
160 long status = 0;
161 ssize_t written = 0;
162 unsigned int flags = AOP_FLAG_NOFS;
163
Christoph Hellwigae259a92016-06-21 09:23:11 +1000164 do {
165 struct page *page;
166 unsigned long offset; /* Offset into pagecache page */
167 unsigned long bytes; /* Bytes to write to page */
168 size_t copied; /* Bytes copied from user */
169
170 offset = (pos & (PAGE_SIZE - 1));
171 bytes = min_t(unsigned long, PAGE_SIZE - offset,
172 iov_iter_count(i));
173again:
174 if (bytes > length)
175 bytes = length;
176
177 /*
178 * Bring in the user page that we will copy from _first_.
179 * Otherwise there's a nasty deadlock on copying from the
180 * same page as we're writing to, without it being marked
181 * up-to-date.
182 *
183 * Not only is this an optimisation, but it is also required
184 * to check that the address is actually valid, when atomic
185 * usercopies are used, below.
186 */
187 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
188 status = -EFAULT;
189 break;
190 }
191
192 status = iomap_write_begin(inode, pos, bytes, flags, &page,
193 iomap);
194 if (unlikely(status))
195 break;
196
197 if (mapping_writably_mapped(inode->i_mapping))
198 flush_dcache_page(page);
199
Christoph Hellwigae259a92016-06-21 09:23:11 +1000200 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000201
202 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000203
204 status = iomap_write_end(inode, pos, bytes, copied, page);
205 if (unlikely(status < 0))
206 break;
207 copied = status;
208
209 cond_resched();
210
211 iov_iter_advance(i, copied);
212 if (unlikely(copied == 0)) {
213 /*
214 * If we were unable to copy any data at all, we must
215 * fall back to a single segment length write.
216 *
217 * If we didn't fallback here, we could livelock
218 * because not all segments in the iov can be copied at
219 * once without a pagefault.
220 */
221 bytes = min_t(unsigned long, PAGE_SIZE - offset,
222 iov_iter_single_seg_count(i));
223 goto again;
224 }
225 pos += copied;
226 written += copied;
227 length -= copied;
228
229 balance_dirty_pages_ratelimited(inode->i_mapping);
230 } while (iov_iter_count(i) && length);
231
232 return written ? written : status;
233}
234
235ssize_t
236iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800237 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000238{
239 struct inode *inode = iocb->ki_filp->f_mapping->host;
240 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
241
242 while (iov_iter_count(iter)) {
243 ret = iomap_apply(inode, pos, iov_iter_count(iter),
244 IOMAP_WRITE, ops, iter, iomap_write_actor);
245 if (ret <= 0)
246 break;
247 pos += ret;
248 written += ret;
249 }
250
251 return written ? written : ret;
252}
253EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
254
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000255static struct page *
256__iomap_read_page(struct inode *inode, loff_t offset)
257{
258 struct address_space *mapping = inode->i_mapping;
259 struct page *page;
260
261 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
262 if (IS_ERR(page))
263 return page;
264 if (!PageUptodate(page)) {
265 put_page(page);
266 return ERR_PTR(-EIO);
267 }
268 return page;
269}
270
271static loff_t
272iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
273 struct iomap *iomap)
274{
275 long status = 0;
276 ssize_t written = 0;
277
278 do {
279 struct page *page, *rpage;
280 unsigned long offset; /* Offset into pagecache page */
281 unsigned long bytes; /* Bytes to write to page */
282
283 offset = (pos & (PAGE_SIZE - 1));
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700284 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000285
286 rpage = __iomap_read_page(inode, pos);
287 if (IS_ERR(rpage))
288 return PTR_ERR(rpage);
289
290 status = iomap_write_begin(inode, pos, bytes,
Tetsuo Handac718a972017-05-08 15:58:59 -0700291 AOP_FLAG_NOFS, &page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000292 put_page(rpage);
293 if (unlikely(status))
294 return status;
295
296 WARN_ON_ONCE(!PageUptodate(page));
297
298 status = iomap_write_end(inode, pos, bytes, bytes, page);
299 if (unlikely(status <= 0)) {
300 if (WARN_ON_ONCE(status == 0))
301 return -EIO;
302 return status;
303 }
304
305 cond_resched();
306
307 pos += status;
308 written += status;
309 length -= status;
310
311 balance_dirty_pages_ratelimited(inode->i_mapping);
312 } while (length);
313
314 return written;
315}
316
317int
318iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800319 const struct iomap_ops *ops)
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000320{
321 loff_t ret;
322
323 while (len) {
324 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
325 iomap_dirty_actor);
326 if (ret <= 0)
327 return ret;
328 pos += ret;
329 len -= ret;
330 }
331
332 return 0;
333}
334EXPORT_SYMBOL_GPL(iomap_file_dirty);
335
Christoph Hellwigae259a92016-06-21 09:23:11 +1000336static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
337 unsigned bytes, struct iomap *iomap)
338{
339 struct page *page;
340 int status;
341
Tetsuo Handac718a972017-05-08 15:58:59 -0700342 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
343 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000344 if (status)
345 return status;
346
347 zero_user(page, offset, bytes);
348 mark_page_accessed(page);
349
350 return iomap_write_end(inode, pos, bytes, bytes, page);
351}
352
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000353static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
354 struct iomap *iomap)
355{
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -0400356 sector_t sector = (iomap->addr +
357 (pos & PAGE_MASK) - iomap->offset) >> 9;
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000358
Dan Williamscccbce62017-01-27 13:31:42 -0800359 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
360 offset, bytes);
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000361}
362
Christoph Hellwigae259a92016-06-21 09:23:11 +1000363static loff_t
364iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
365 void *data, struct iomap *iomap)
366{
367 bool *did_zero = data;
368 loff_t written = 0;
369 int status;
370
371 /* already zeroed? we're done. */
372 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
373 return count;
374
375 do {
376 unsigned offset, bytes;
377
378 offset = pos & (PAGE_SIZE - 1); /* Within page */
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700379 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000380
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000381 if (IS_DAX(inode))
382 status = iomap_dax_zero(pos, offset, bytes, iomap);
383 else
384 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000385 if (status < 0)
386 return status;
387
388 pos += bytes;
389 count -= bytes;
390 written += bytes;
391 if (did_zero)
392 *did_zero = true;
393 } while (count > 0);
394
395 return written;
396}
397
398int
399iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800400 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000401{
402 loff_t ret;
403
404 while (len > 0) {
405 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
406 ops, did_zero, iomap_zero_range_actor);
407 if (ret <= 0)
408 return ret;
409
410 pos += ret;
411 len -= ret;
412 }
413
414 return 0;
415}
416EXPORT_SYMBOL_GPL(iomap_zero_range);
417
418int
419iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800420 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000421{
Fabian Frederick93407472017-02-27 14:28:32 -0800422 unsigned int blocksize = i_blocksize(inode);
423 unsigned int off = pos & (blocksize - 1);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000424
425 /* Block boundary? Nothing to do */
426 if (!off)
427 return 0;
428 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
429}
430EXPORT_SYMBOL_GPL(iomap_truncate_page);
431
432static loff_t
433iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
434 void *data, struct iomap *iomap)
435{
436 struct page *page = data;
437 int ret;
438
Jan Karac663e292016-10-24 14:20:25 +1100439 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000440 if (ret)
441 return ret;
442
443 block_commit_write(page, 0, length);
444 return length;
445}
446
Dave Jiang11bac802017-02-24 14:56:41 -0800447int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000448{
449 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -0800450 struct inode *inode = file_inode(vmf->vma->vm_file);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000451 unsigned long length;
452 loff_t offset, size;
453 ssize_t ret;
454
455 lock_page(page);
456 size = i_size_read(inode);
457 if ((page->mapping != inode->i_mapping) ||
458 (page_offset(page) > size)) {
459 /* We overload EFAULT to mean page got truncated */
460 ret = -EFAULT;
461 goto out_unlock;
462 }
463
464 /* page is wholly or partially inside EOF */
465 if (((page->index + 1) << PAGE_SHIFT) > size)
466 length = size & ~PAGE_MASK;
467 else
468 length = PAGE_SIZE;
469
470 offset = page_offset(page);
471 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +1100472 ret = iomap_apply(inode, offset, length,
473 IOMAP_WRITE | IOMAP_FAULT, ops, page,
474 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000475 if (unlikely(ret <= 0))
476 goto out_unlock;
477 offset += ret;
478 length -= ret;
479 }
480
481 set_page_dirty(page);
482 wait_for_stable_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -0700483 return VM_FAULT_LOCKED;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000484out_unlock:
485 unlock_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -0700486 return block_page_mkwrite_return(ret);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000487}
488EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000489
490struct fiemap_ctx {
491 struct fiemap_extent_info *fi;
492 struct iomap prev;
493};
494
495static int iomap_to_fiemap(struct fiemap_extent_info *fi,
496 struct iomap *iomap, u32 flags)
497{
498 switch (iomap->type) {
499 case IOMAP_HOLE:
500 /* skip holes */
501 return 0;
502 case IOMAP_DELALLOC:
503 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
504 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -0700505 case IOMAP_MAPPED:
506 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000507 case IOMAP_UNWRITTEN:
508 flags |= FIEMAP_EXTENT_UNWRITTEN;
509 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -0700510 case IOMAP_INLINE:
511 flags |= FIEMAP_EXTENT_DATA_INLINE;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000512 break;
513 }
514
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000515 if (iomap->flags & IOMAP_F_MERGED)
516 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +1000517 if (iomap->flags & IOMAP_F_SHARED)
518 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000519
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000520 return fiemap_fill_next_extent(fi, iomap->offset,
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -0400521 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000522 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000523}
524
525static loff_t
526iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
527 struct iomap *iomap)
528{
529 struct fiemap_ctx *ctx = data;
530 loff_t ret = length;
531
532 if (iomap->type == IOMAP_HOLE)
533 return length;
534
535 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
536 ctx->prev = *iomap;
537 switch (ret) {
538 case 0: /* success */
539 return length;
540 case 1: /* extent array full */
541 return 0;
542 default:
543 return ret;
544 }
545}
546
547int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800548 loff_t start, loff_t len, const struct iomap_ops *ops)
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000549{
550 struct fiemap_ctx ctx;
551 loff_t ret;
552
553 memset(&ctx, 0, sizeof(ctx));
554 ctx.fi = fi;
555 ctx.prev.type = IOMAP_HOLE;
556
557 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
558 if (ret)
559 return ret;
560
Dave Chinner8896b8f2016-08-17 08:41:10 +1000561 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
562 ret = filemap_write_and_wait(inode->i_mapping);
563 if (ret)
564 return ret;
565 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000566
567 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +1100568 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000569 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +1000570 /* inode with no (attribute) mapping will give ENOENT */
571 if (ret == -ENOENT)
572 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000573 if (ret < 0)
574 return ret;
575 if (ret == 0)
576 break;
577
578 start += ret;
579 len -= ret;
580 }
581
582 if (ctx.prev.type != IOMAP_HOLE) {
583 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
584 if (ret < 0)
585 return ret;
586 }
587
588 return 0;
589}
590EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100591
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700592static loff_t
593iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
594 void *data, struct iomap *iomap)
595{
596 switch (iomap->type) {
597 case IOMAP_UNWRITTEN:
598 offset = page_cache_seek_hole_data(inode, offset, length,
599 SEEK_HOLE);
600 if (offset < 0)
601 return length;
602 /* fall through */
603 case IOMAP_HOLE:
604 *(loff_t *)data = offset;
605 return 0;
606 default:
607 return length;
608 }
609}
610
611loff_t
612iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
613{
614 loff_t size = i_size_read(inode);
615 loff_t length = size - offset;
616 loff_t ret;
617
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -0700618 /* Nothing to be found before or beyond the end of the file. */
619 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700620 return -ENXIO;
621
622 while (length > 0) {
623 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
624 &offset, iomap_seek_hole_actor);
625 if (ret < 0)
626 return ret;
627 if (ret == 0)
628 break;
629
630 offset += ret;
631 length -= ret;
632 }
633
634 return offset;
635}
636EXPORT_SYMBOL_GPL(iomap_seek_hole);
637
638static loff_t
639iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
640 void *data, struct iomap *iomap)
641{
642 switch (iomap->type) {
643 case IOMAP_HOLE:
644 return length;
645 case IOMAP_UNWRITTEN:
646 offset = page_cache_seek_hole_data(inode, offset, length,
647 SEEK_DATA);
648 if (offset < 0)
649 return length;
650 /*FALLTHRU*/
651 default:
652 *(loff_t *)data = offset;
653 return 0;
654 }
655}
656
657loff_t
658iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
659{
660 loff_t size = i_size_read(inode);
661 loff_t length = size - offset;
662 loff_t ret;
663
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -0700664 /* Nothing to be found before or beyond the end of the file. */
665 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700666 return -ENXIO;
667
668 while (length > 0) {
669 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
670 &offset, iomap_seek_data_actor);
671 if (ret < 0)
672 return ret;
673 if (ret == 0)
674 break;
675
676 offset += ret;
677 length -= ret;
678 }
679
680 if (length <= 0)
681 return -ENXIO;
682 return offset;
683}
684EXPORT_SYMBOL_GPL(iomap_seek_data);
685
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100686/*
687 * Private flags for iomap_dio, must not overlap with the public ones in
688 * iomap.h:
689 */
Dave Chinner3460cac2018-05-02 12:54:53 -0700690#define IOMAP_DIO_WRITE_FUA (1 << 28)
Dave Chinner4f8ff442018-05-02 12:54:52 -0700691#define IOMAP_DIO_NEED_SYNC (1 << 29)
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100692#define IOMAP_DIO_WRITE (1 << 30)
693#define IOMAP_DIO_DIRTY (1 << 31)
694
695struct iomap_dio {
696 struct kiocb *iocb;
697 iomap_dio_end_io_t *end_io;
698 loff_t i_size;
699 loff_t size;
700 atomic_t ref;
701 unsigned flags;
702 int error;
703
704 union {
705 /* used during submission and for synchronous completion: */
706 struct {
707 struct iov_iter *iter;
708 struct task_struct *waiter;
709 struct request_queue *last_queue;
710 blk_qc_t cookie;
711 } submit;
712
713 /* used for aio completion: */
714 struct {
715 struct work_struct work;
716 } aio;
717 };
718};
719
720static ssize_t iomap_dio_complete(struct iomap_dio *dio)
721{
722 struct kiocb *iocb = dio->iocb;
Lukas Czerner332391a2017-09-21 08:16:29 -0600723 struct inode *inode = file_inode(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -0700724 loff_t offset = iocb->ki_pos;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100725 ssize_t ret;
726
727 if (dio->end_io) {
728 ret = dio->end_io(iocb,
729 dio->error ? dio->error : dio->size,
730 dio->flags);
731 } else {
732 ret = dio->error;
733 }
734
735 if (likely(!ret)) {
736 ret = dio->size;
737 /* check for short read */
Eryu Guan5e25c262017-10-13 09:47:46 -0700738 if (offset + ret > dio->i_size &&
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100739 !(dio->flags & IOMAP_DIO_WRITE))
Eryu Guan5e25c262017-10-13 09:47:46 -0700740 ret = dio->i_size - offset;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100741 iocb->ki_pos += ret;
742 }
743
Eryu Guan5e25c262017-10-13 09:47:46 -0700744 /*
745 * Try again to invalidate clean pages which might have been cached by
746 * non-direct readahead, or faulted in by get_user_pages() if the source
747 * of the write was an mmap'ed region of the file we're writing. Either
748 * one is a pretty crazy thing to do, so we don't support it 100%. If
749 * this invalidation fails, tough, the write still worked...
750 *
751 * And this page cache invalidation has to be after dio->end_io(), as
752 * some filesystems convert unwritten extents to real allocations in
753 * end_io() when necessary, otherwise a racing buffer read would cache
754 * zeros from unwritten extents.
755 */
756 if (!dio->error &&
757 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
758 int err;
759 err = invalidate_inode_pages2_range(inode->i_mapping,
760 offset >> PAGE_SHIFT,
761 (offset + dio->size - 1) >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -0800762 if (err)
763 dio_warn_stale_pagecache(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -0700764 }
765
Dave Chinner4f8ff442018-05-02 12:54:52 -0700766 /*
767 * If this is a DSYNC write, make sure we push it to stable storage now
768 * that we've written data.
769 */
770 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
771 ret = generic_write_sync(iocb, ret);
772
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100773 inode_dio_end(file_inode(iocb->ki_filp));
774 kfree(dio);
775
776 return ret;
777}
778
779static void iomap_dio_complete_work(struct work_struct *work)
780{
781 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
782 struct kiocb *iocb = dio->iocb;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100783
Dave Chinner4f8ff442018-05-02 12:54:52 -0700784 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100785}
786
787/*
788 * Set an error in the dio if none is set yet. We have to use cmpxchg
789 * as the submission context and the completion context(s) can race to
790 * update the error.
791 */
792static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
793{
794 cmpxchg(&dio->error, 0, ret);
795}
796
797static void iomap_dio_bio_end_io(struct bio *bio)
798{
799 struct iomap_dio *dio = bio->bi_private;
800 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
801
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200802 if (bio->bi_status)
803 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100804
805 if (atomic_dec_and_test(&dio->ref)) {
806 if (is_sync_kiocb(dio->iocb)) {
807 struct task_struct *waiter = dio->submit.waiter;
808
809 WRITE_ONCE(dio->submit.waiter, NULL);
810 wake_up_process(waiter);
811 } else if (dio->flags & IOMAP_DIO_WRITE) {
812 struct inode *inode = file_inode(dio->iocb->ki_filp);
813
814 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
815 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
816 } else {
817 iomap_dio_complete_work(&dio->aio.work);
818 }
819 }
820
821 if (should_dirty) {
822 bio_check_pages_dirty(bio);
823 } else {
824 struct bio_vec *bvec;
825 int i;
826
827 bio_for_each_segment_all(bvec, bio, i)
828 put_page(bvec->bv_page);
829 bio_put(bio);
830 }
831}
832
833static blk_qc_t
834iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
835 unsigned len)
836{
837 struct page *page = ZERO_PAGE(0);
838 struct bio *bio;
839
840 bio = bio_alloc(GFP_KERNEL, 1);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200841 bio_set_dev(bio, iomap->bdev);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100842 bio->bi_iter.bi_sector =
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -0400843 (iomap->addr + pos - iomap->offset) >> 9;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100844 bio->bi_private = dio;
845 bio->bi_end_io = iomap_dio_bio_end_io;
846
847 get_page(page);
848 if (bio_add_page(bio, page, len, 0) != len)
849 BUG();
Linus Torvalds5cc60ae2016-12-14 21:35:31 -0800850 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100851
852 atomic_inc(&dio->ref);
853 return submit_bio(bio);
854}
855
856static loff_t
857iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
858 void *data, struct iomap *iomap)
859{
860 struct iomap_dio *dio = data;
Fabian Frederick93407472017-02-27 14:28:32 -0800861 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
862 unsigned int fs_block_size = i_blocksize(inode), pad;
863 unsigned int align = iov_iter_alignment(dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100864 struct iov_iter iter;
865 struct bio *bio;
866 bool need_zeroout = false;
Dave Chinner3460cac2018-05-02 12:54:53 -0700867 bool use_fua = false;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100868 int nr_pages, ret;
Al Virocfe057f2017-09-11 21:17:09 +0100869 size_t copied = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100870
871 if ((pos | length | align) & ((1 << blkbits) - 1))
872 return -EINVAL;
873
874 switch (iomap->type) {
875 case IOMAP_HOLE:
876 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
877 return -EIO;
878 /*FALLTHRU*/
879 case IOMAP_UNWRITTEN:
880 if (!(dio->flags & IOMAP_DIO_WRITE)) {
Al Virocfe057f2017-09-11 21:17:09 +0100881 length = iov_iter_zero(length, dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100882 dio->size += length;
883 return length;
884 }
885 dio->flags |= IOMAP_DIO_UNWRITTEN;
886 need_zeroout = true;
887 break;
888 case IOMAP_MAPPED:
889 if (iomap->flags & IOMAP_F_SHARED)
890 dio->flags |= IOMAP_DIO_COW;
Dave Chinner3460cac2018-05-02 12:54:53 -0700891 if (iomap->flags & IOMAP_F_NEW) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100892 need_zeroout = true;
Dave Chinner3460cac2018-05-02 12:54:53 -0700893 } else {
894 /*
895 * Use a FUA write if we need datasync semantics, this
896 * is a pure data IO that doesn't require any metadata
897 * updates and the underlying device supports FUA. This
898 * allows us to avoid cache flushes on IO completion.
899 */
900 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
901 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
902 blk_queue_fua(bdev_get_queue(iomap->bdev)))
903 use_fua = true;
904 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100905 break;
906 default:
907 WARN_ON_ONCE(1);
908 return -EIO;
909 }
910
911 /*
912 * Operate on a partial iter trimmed to the extent we were called for.
913 * We'll update the iter in the dio once we're done with this extent.
914 */
915 iter = *dio->submit.iter;
916 iov_iter_truncate(&iter, length);
917
918 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
919 if (nr_pages <= 0)
920 return nr_pages;
921
922 if (need_zeroout) {
923 /* zero out from the start of the block to the write offset */
924 pad = pos & (fs_block_size - 1);
925 if (pad)
926 iomap_dio_zero(dio, iomap, pos - pad, pad);
927 }
928
929 do {
Al Virocfe057f2017-09-11 21:17:09 +0100930 size_t n;
931 if (dio->error) {
932 iov_iter_revert(dio->submit.iter, copied);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100933 return 0;
Al Virocfe057f2017-09-11 21:17:09 +0100934 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100935
936 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200937 bio_set_dev(bio, iomap->bdev);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100938 bio->bi_iter.bi_sector =
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -0400939 (iomap->addr + pos - iomap->offset) >> 9;
Jens Axboe45d06cf2017-06-27 11:01:22 -0600940 bio->bi_write_hint = dio->iocb->ki_hint;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100941 bio->bi_private = dio;
942 bio->bi_end_io = iomap_dio_bio_end_io;
943
944 ret = bio_iov_iter_get_pages(bio, &iter);
945 if (unlikely(ret)) {
946 bio_put(bio);
Al Virocfe057f2017-09-11 21:17:09 +0100947 return copied ? copied : ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100948 }
949
Al Virocfe057f2017-09-11 21:17:09 +0100950 n = bio->bi_iter.bi_size;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100951 if (dio->flags & IOMAP_DIO_WRITE) {
Dave Chinner3460cac2018-05-02 12:54:53 -0700952 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
953 if (use_fua)
954 bio->bi_opf |= REQ_FUA;
955 else
956 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
Al Virocfe057f2017-09-11 21:17:09 +0100957 task_io_account_write(n);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100958 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -0700959 bio->bi_opf = REQ_OP_READ;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100960 if (dio->flags & IOMAP_DIO_DIRTY)
961 bio_set_pages_dirty(bio);
962 }
963
Al Virocfe057f2017-09-11 21:17:09 +0100964 iov_iter_advance(dio->submit.iter, n);
965
966 dio->size += n;
967 pos += n;
968 copied += n;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100969
970 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
971
972 atomic_inc(&dio->ref);
973
974 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
975 dio->submit.cookie = submit_bio(bio);
976 } while (nr_pages);
977
978 if (need_zeroout) {
979 /* zero out from the end of the write to the end of the block */
980 pad = pos & (fs_block_size - 1);
981 if (pad)
982 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
983 }
Al Virocfe057f2017-09-11 21:17:09 +0100984 return copied;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100985}
986
Dave Chinner4f8ff442018-05-02 12:54:52 -0700987/*
988 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
Dave Chinner3460cac2018-05-02 12:54:53 -0700989 * is being issued as AIO or not. This allows us to optimise pure data writes
990 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
991 * REQ_FLUSH post write. This is slightly tricky because a single request here
992 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
993 * may be pure data writes. In that case, we still need to do a full data sync
994 * completion.
Dave Chinner4f8ff442018-05-02 12:54:52 -0700995 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100996ssize_t
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800997iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
998 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100999{
1000 struct address_space *mapping = iocb->ki_filp->f_mapping;
1001 struct inode *inode = file_inode(iocb->ki_filp);
1002 size_t count = iov_iter_count(iter);
Eryu Guanc771c142017-03-02 15:02:06 -08001003 loff_t pos = iocb->ki_pos, start = pos;
1004 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001005 unsigned int flags = IOMAP_DIRECT;
1006 struct blk_plug plug;
1007 struct iomap_dio *dio;
1008
1009 lockdep_assert_held(&inode->i_rwsem);
1010
1011 if (!count)
1012 return 0;
1013
1014 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1015 if (!dio)
1016 return -ENOMEM;
1017
1018 dio->iocb = iocb;
1019 atomic_set(&dio->ref, 1);
1020 dio->size = 0;
1021 dio->i_size = i_size_read(inode);
1022 dio->end_io = end_io;
1023 dio->error = 0;
1024 dio->flags = 0;
1025
1026 dio->submit.iter = iter;
1027 if (is_sync_kiocb(iocb)) {
1028 dio->submit.waiter = current;
1029 dio->submit.cookie = BLK_QC_T_NONE;
1030 dio->submit.last_queue = NULL;
1031 }
1032
1033 if (iov_iter_rw(iter) == READ) {
1034 if (pos >= dio->i_size)
1035 goto out_free_dio;
1036
1037 if (iter->type == ITER_IOVEC)
1038 dio->flags |= IOMAP_DIO_DIRTY;
1039 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001040 flags |= IOMAP_WRITE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001041 dio->flags |= IOMAP_DIO_WRITE;
Dave Chinner3460cac2018-05-02 12:54:53 -07001042
1043 /* for data sync or sync, we need sync completion processing */
Dave Chinner4f8ff442018-05-02 12:54:52 -07001044 if (iocb->ki_flags & IOCB_DSYNC)
1045 dio->flags |= IOMAP_DIO_NEED_SYNC;
Dave Chinner3460cac2018-05-02 12:54:53 -07001046
1047 /*
1048 * For datasync only writes, we optimistically try using FUA for
1049 * this IO. Any non-FUA write that occurs will clear this flag,
1050 * hence we know before completion whether a cache flush is
1051 * necessary.
1052 */
1053 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1054 dio->flags |= IOMAP_DIO_WRITE_FUA;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001055 }
1056
Goldwyn Rodriguesa38d1242017-06-20 07:05:45 -05001057 if (iocb->ki_flags & IOCB_NOWAIT) {
1058 if (filemap_range_has_page(mapping, start, end)) {
1059 ret = -EAGAIN;
1060 goto out_free_dio;
1061 }
1062 flags |= IOMAP_NOWAIT;
1063 }
1064
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001065 ret = filemap_write_and_wait_range(mapping, start, end);
1066 if (ret)
1067 goto out_free_dio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001068
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001069 /*
1070 * Try to invalidate cache pages for the range we're direct
1071 * writing. If this invalidation fails, tough, the write will
1072 * still work, but racing two incompatible write paths is a
1073 * pretty crazy thing to do, so we don't support it 100%.
1074 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001075 ret = invalidate_inode_pages2_range(mapping,
1076 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001077 if (ret)
1078 dio_warn_stale_pagecache(iocb->ki_filp);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001079 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001080
Chandan Rajendra546e7be2017-09-22 11:47:33 -07001081 if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
1082 !inode->i_sb->s_dio_done_wq) {
1083 ret = sb_init_dio_done_wq(inode->i_sb);
1084 if (ret < 0)
1085 goto out_free_dio;
1086 }
1087
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001088 inode_dio_begin(inode);
1089
1090 blk_start_plug(&plug);
1091 do {
1092 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1093 iomap_dio_actor);
1094 if (ret <= 0) {
1095 /* magic error code to fall back to buffered I/O */
1096 if (ret == -ENOTBLK)
1097 ret = 0;
1098 break;
1099 }
1100 pos += ret;
Chandan Rajendraa008c312017-04-12 11:03:20 -07001101
1102 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1103 break;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001104 } while ((count = iov_iter_count(iter)) > 0);
1105 blk_finish_plug(&plug);
1106
1107 if (ret < 0)
1108 iomap_dio_set_error(dio, ret);
1109
Dave Chinner3460cac2018-05-02 12:54:53 -07001110 /*
1111 * If all the writes we issued were FUA, we don't need to flush the
1112 * cache on IO completion. Clear the sync flag for this case.
1113 */
1114 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1115 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1116
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001117 if (!atomic_dec_and_test(&dio->ref)) {
1118 if (!is_sync_kiocb(iocb))
1119 return -EIOCBQUEUED;
1120
1121 for (;;) {
1122 set_current_state(TASK_UNINTERRUPTIBLE);
1123 if (!READ_ONCE(dio->submit.waiter))
1124 break;
1125
1126 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1127 !dio->submit.last_queue ||
Christoph Hellwigea435e12017-11-02 21:29:54 +03001128 !blk_poll(dio->submit.last_queue,
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001129 dio->submit.cookie))
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001130 io_schedule();
1131 }
1132 __set_current_state(TASK_RUNNING);
1133 }
1134
Eryu Guanc771c142017-03-02 15:02:06 -08001135 ret = iomap_dio_complete(dio);
1136
Eryu Guanc771c142017-03-02 15:02:06 -08001137 return ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001138
1139out_free_dio:
1140 kfree(dio);
1141 return ret;
1142}
1143EXPORT_SYMBOL_GPL(iomap_dio_rw);
Darrick J. Wong67482122018-05-10 08:38:15 -07001144
1145/* Swapfile activation */
1146
1147#ifdef CONFIG_SWAP
1148struct iomap_swapfile_info {
1149 struct iomap iomap; /* accumulated iomap */
1150 struct swap_info_struct *sis;
1151 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1152 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1153 unsigned long nr_pages; /* number of pages collected */
1154 int nr_extents; /* extent count */
1155};
1156
1157/*
1158 * Collect physical extents for this swap file. Physical extents reported to
1159 * the swap code must be trimmed to align to a page boundary. The logical
1160 * offset within the file is irrelevant since the swapfile code maps logical
1161 * page numbers of the swap device to the physical page-aligned extents.
1162 */
1163static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1164{
1165 struct iomap *iomap = &isi->iomap;
1166 unsigned long nr_pages;
1167 uint64_t first_ppage;
1168 uint64_t first_ppage_reported;
1169 uint64_t next_ppage;
1170 int error;
1171
1172 /*
1173 * Round the start up and the end down so that the physical
1174 * extent aligns to a page boundary.
1175 */
1176 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
1177 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
1178 PAGE_SHIFT;
1179
1180 /* Skip too-short physical extents. */
1181 if (first_ppage >= next_ppage)
1182 return 0;
1183 nr_pages = next_ppage - first_ppage;
1184
1185 /*
1186 * Calculate how much swap space we're adding; the first page contains
1187 * the swap header and doesn't count. The mm still wants that first
1188 * page fed to add_swap_extent, however.
1189 */
1190 first_ppage_reported = first_ppage;
1191 if (iomap->offset == 0)
1192 first_ppage_reported++;
1193 if (isi->lowest_ppage > first_ppage_reported)
1194 isi->lowest_ppage = first_ppage_reported;
1195 if (isi->highest_ppage < (next_ppage - 1))
1196 isi->highest_ppage = next_ppage - 1;
1197
1198 /* Add extent, set up for the next call. */
1199 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
1200 if (error < 0)
1201 return error;
1202 isi->nr_extents += error;
1203 isi->nr_pages += nr_pages;
1204 return 0;
1205}
1206
1207/*
1208 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
1209 * swap only cares about contiguous page-aligned physical extents and makes no
1210 * distinction between written and unwritten extents.
1211 */
1212static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
1213 loff_t count, void *data, struct iomap *iomap)
1214{
1215 struct iomap_swapfile_info *isi = data;
1216 int error;
1217
Christoph Hellwig19319b52018-06-01 09:03:06 -07001218 switch (iomap->type) {
1219 case IOMAP_MAPPED:
1220 case IOMAP_UNWRITTEN:
1221 /* Only real or unwritten extents. */
1222 break;
1223 case IOMAP_INLINE:
1224 /* No inline data. */
Omar Sandovalec601922018-05-16 11:13:34 -07001225 pr_err("swapon: file is inline\n");
1226 return -EINVAL;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001227 default:
Omar Sandovalec601922018-05-16 11:13:34 -07001228 pr_err("swapon: file has unallocated extents\n");
1229 return -EINVAL;
1230 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001231
Omar Sandovalec601922018-05-16 11:13:34 -07001232 /* No uncommitted metadata or shared blocks. */
1233 if (iomap->flags & IOMAP_F_DIRTY) {
1234 pr_err("swapon: file is not committed\n");
1235 return -EINVAL;
1236 }
1237 if (iomap->flags & IOMAP_F_SHARED) {
1238 pr_err("swapon: file has shared extents\n");
1239 return -EINVAL;
1240 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001241
Omar Sandovalec601922018-05-16 11:13:34 -07001242 /* Only one bdev per swap file. */
1243 if (iomap->bdev != isi->sis->bdev) {
1244 pr_err("swapon: file is on multiple devices\n");
1245 return -EINVAL;
1246 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001247
1248 if (isi->iomap.length == 0) {
1249 /* No accumulated extent, so just store it. */
1250 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1251 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
1252 /* Append this to the accumulated extent. */
1253 isi->iomap.length += iomap->length;
1254 } else {
1255 /* Otherwise, add the retained iomap and store this one. */
1256 error = iomap_swapfile_add_extent(isi);
1257 if (error)
1258 return error;
1259 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1260 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001261 return count;
Darrick J. Wong67482122018-05-10 08:38:15 -07001262}
1263
1264/*
1265 * Iterate a swap file's iomaps to construct physical extents that can be
1266 * passed to the swapfile subsystem.
1267 */
1268int iomap_swapfile_activate(struct swap_info_struct *sis,
1269 struct file *swap_file, sector_t *pagespan,
1270 const struct iomap_ops *ops)
1271{
1272 struct iomap_swapfile_info isi = {
1273 .sis = sis,
1274 .lowest_ppage = (sector_t)-1ULL,
1275 };
1276 struct address_space *mapping = swap_file->f_mapping;
1277 struct inode *inode = mapping->host;
1278 loff_t pos = 0;
1279 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
1280 loff_t ret;
1281
1282 ret = filemap_write_and_wait(inode->i_mapping);
1283 if (ret)
1284 return ret;
1285
1286 while (len > 0) {
1287 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
1288 ops, &isi, iomap_swapfile_activate_actor);
1289 if (ret <= 0)
1290 return ret;
1291
1292 pos += ret;
1293 len -= ret;
1294 }
1295
1296 if (isi.iomap.length) {
1297 ret = iomap_swapfile_add_extent(&isi);
1298 if (ret)
1299 return ret;
1300 }
1301
1302 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
1303 sis->max = isi.nr_pages;
1304 sis->pages = isi.nr_pages - 1;
1305 sis->highest_bit = isi.nr_pages - 1;
1306 return isi.nr_extents;
1307}
1308EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
1309#endif /* CONFIG_SWAP */