blob: 667e297f59b165f2630507582ee4fabd2bdb97d2 [file] [log] [blame]
Christoph Hellwig6ad5b322019-06-28 19:27:26 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019 Christoph Hellwig.
4 */
5#include "xfs.h"
6
7static inline unsigned int bio_max_vecs(unsigned int count)
8{
Matthew Wilcox (Oracle)5f7136d2021-01-29 04:38:57 +00009 return bio_max_segs(howmany(count, PAGE_SIZE));
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070010}
11
Dave Chinner0431d922021-06-18 08:21:49 -070012static void
13xfs_flush_bdev_async_endio(
14 struct bio *bio)
15{
16 complete(bio->bi_private);
17}
18
19/*
20 * Submit a request for an async cache flush to run. If the request queue does
21 * not require flush operations, just skip it altogether. If the caller needs
22 * to wait for the flush completion at a later point in time, they must supply a
23 * valid completion. This will be signalled when the flush completes. The
24 * caller never sees the bio that is issued here.
25 */
26void
27xfs_flush_bdev_async(
28 struct bio *bio,
29 struct block_device *bdev,
30 struct completion *done)
31{
32 struct request_queue *q = bdev->bd_disk->queue;
33
34 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
35 complete(done);
36 return;
37 }
38
39 bio_init(bio, NULL, 0);
40 bio_set_dev(bio, bdev);
41 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
42 bio->bi_private = done;
43 bio->bi_end_io = xfs_flush_bdev_async_endio;
44
45 submit_bio(bio);
46}
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070047int
48xfs_rw_bdev(
49 struct block_device *bdev,
50 sector_t sector,
51 unsigned int count,
52 char *data,
53 unsigned int op)
54
55{
56 unsigned int is_vmalloc = is_vmalloc_addr(data);
57 unsigned int left = count;
58 int error;
59 struct bio *bio;
60
61 if (is_vmalloc && op == REQ_OP_WRITE)
62 flush_kernel_vmap_range(data, count);
63
64 bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
65 bio_set_dev(bio, bdev);
66 bio->bi_iter.bi_sector = sector;
67 bio->bi_opf = op | REQ_META | REQ_SYNC;
68
69 do {
70 struct page *page = kmem_to_page(data);
71 unsigned int off = offset_in_page(data);
72 unsigned int len = min_t(unsigned, left, PAGE_SIZE - off);
73
74 while (bio_add_page(bio, page, len, off) != len) {
75 struct bio *prev = bio;
76
77 bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
78 bio_copy_dev(bio, prev);
79 bio->bi_iter.bi_sector = bio_end_sector(prev);
80 bio->bi_opf = prev->bi_opf;
Christoph Hellwig488ca3d2019-07-09 08:58:28 -070081 bio_chain(prev, bio);
Christoph Hellwig6ad5b322019-06-28 19:27:26 -070082
83 submit_bio(prev);
84 }
85
86 data += len;
87 left -= len;
88 } while (left > 0);
89
90 error = submit_bio_wait(bio);
91 bio_put(bio);
92
93 if (is_vmalloc && op == REQ_OP_READ)
94 invalidate_kernel_vmap_range(data, count);
95 return error;
96}