blob: 7e9e666c04dcf645463fd9107fda2c3e470e2b1f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
David Howells831058d2006-08-29 19:06:00 +01002/* bounce buffer handling for block devices
3 *
4 * - Split from highmem.c
5 */
6
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07007#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
David Howells831058d2006-08-29 19:06:00 +01009#include <linux/mm.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040010#include <linux/export.h>
David Howells831058d2006-08-29 19:06:00 +010011#include <linux/swap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
David Howells831058d2006-08-29 19:06:00 +010013#include <linux/bio.h>
14#include <linux/pagemap.h>
15#include <linux/mempool.h>
16#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040017#include <linux/backing-dev.h>
David Howells831058d2006-08-29 19:06:00 +010018#include <linux/init.h>
19#include <linux/hash.h>
20#include <linux/highmem.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070021#include <linux/printk.h>
David Howells831058d2006-08-29 19:06:00 +010022#include <asm/tlbflush.h>
23
Li Zefan55782132009-06-09 13:43:05 +080024#include <trace/events/block.h>
Christoph Hellwig3bce0162017-06-19 09:26:21 +020025#include "blk.h"
Li Zefan55782132009-06-09 13:43:05 +080026
David Howells831058d2006-08-29 19:06:00 +010027#define POOL_SIZE 64
28#define ISA_POOL_SIZE 16
29
Kent Overstreet338aa962018-05-20 18:25:47 -040030static struct bio_set bounce_bio_set, bounce_bio_split;
Christoph Hellwigce288e02021-03-31 09:29:59 +020031static mempool_t page_pool;
David Howells831058d2006-08-29 19:06:00 +010032
Jens Axboe52990a52018-10-21 12:02:36 -060033static void init_bounce_bioset(void)
34{
35 static bool bounce_bs_setup;
36 int ret;
37
38 if (bounce_bs_setup)
39 return;
40
41 ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
42 BUG_ON(ret);
43 if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
44 BUG_ON(1);
45
46 ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
47 BUG_ON(ret);
48 bounce_bs_setup = true;
49}
50
David Howells831058d2006-08-29 19:06:00 +010051static __init int init_emergency_pool(void)
52{
Kent Overstreet338aa962018-05-20 18:25:47 -040053 int ret;
Christoph Hellwig9bb33f22021-03-31 09:30:00 +020054
55#ifndef CONFIG_MEMORY_HOTPLUG
David Vrabel3bcfeaf2011-10-20 21:24:30 +020056 if (max_pfn <= max_low_pfn)
David Howells831058d2006-08-29 19:06:00 +010057 return 0;
David Vrabel3bcfeaf2011-10-20 21:24:30 +020058#endif
David Howells831058d2006-08-29 19:06:00 +010059
Kent Overstreet338aa962018-05-20 18:25:47 -040060 ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0);
61 BUG_ON(ret);
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070062 pr_info("pool size: %d pages\n", POOL_SIZE);
David Howells831058d2006-08-29 19:06:00 +010063
Jens Axboe52990a52018-10-21 12:02:36 -060064 init_bounce_bioset();
David Howells831058d2006-08-29 19:06:00 +010065 return 0;
66}
67
68__initcall(init_emergency_pool);
69
70/*
David Howells831058d2006-08-29 19:06:00 +010071 * Simple bounce buffer support for highmem pages. Depending on the
72 * queue gfp mask set, *to may or may not be a highmem page. kmap it
73 * always, it will do the Right Thing
74 */
75static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
76{
Ming Lei3c892a02017-12-18 20:22:07 +080077 struct bio_vec tovec, fromvec;
Kent Overstreet79886132013-11-23 17:19:00 -080078 struct bvec_iter iter;
Ming Lei3c892a02017-12-18 20:22:07 +080079 /*
80 * The bio of @from is created by bounce, so we can iterate
81 * its bvec from start to end, but the @from->bi_iter can't be
82 * trusted because it might be changed by splitting.
83 */
84 struct bvec_iter from_iter = BVEC_ITER_ALL_INIT;
David Howells831058d2006-08-29 19:06:00 +010085
Kent Overstreet79886132013-11-23 17:19:00 -080086 bio_for_each_segment(tovec, to, iter) {
Ming Lei3c892a02017-12-18 20:22:07 +080087 fromvec = bio_iter_iovec(from, from_iter);
88 if (tovec.bv_page != fromvec.bv_page) {
Kent Overstreet79886132013-11-23 17:19:00 -080089 /*
90 * fromvec->bv_offset and fromvec->bv_len might have
91 * been modified by the block layer, so use the original
92 * copy, bounce_copy_vec already uses tovec->bv_len
93 */
Christoph Hellwigf434cdc2021-07-27 07:56:42 +020094 memcpy_to_bvec(&tovec, page_address(fromvec.bv_page) +
95 tovec.bv_offset);
Kent Overstreet79886132013-11-23 17:19:00 -080096 }
Ming Lei3c892a02017-12-18 20:22:07 +080097 bio_advance_iter(from, &from_iter, tovec.bv_len);
David Howells831058d2006-08-29 19:06:00 +010098 }
99}
100
Christoph Hellwigce288e02021-03-31 09:29:59 +0200101static void bounce_end_io(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100102{
103 struct bio *bio_orig = bio->bi_private;
Ming Lei7891f052017-12-18 20:22:06 +0800104 struct bio_vec *bvec, orig_vec;
Ming Lei7891f052017-12-18 20:22:06 +0800105 struct bvec_iter orig_iter = bio_orig->bi_iter;
Ming Lei6dc4f102019-02-15 19:13:19 +0800106 struct bvec_iter_all iter_all;
David Howells831058d2006-08-29 19:06:00 +0100107
David Howells831058d2006-08-29 19:06:00 +0100108 /*
109 * free up bounce indirect pages used
110 */
Christoph Hellwig2b070cf2019-04-25 09:03:00 +0200111 bio_for_each_segment_all(bvec, bio, iter_all) {
Ming Lei7891f052017-12-18 20:22:06 +0800112 orig_vec = bio_iter_iovec(bio_orig, orig_iter);
113 if (bvec->bv_page != orig_vec.bv_page) {
114 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
Christoph Hellwigce288e02021-03-31 09:29:59 +0200115 mempool_free(bvec->bv_page, &page_pool);
Ming Lei7891f052017-12-18 20:22:06 +0800116 }
117 bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
David Howells831058d2006-08-29 19:06:00 +0100118 }
119
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200120 bio_orig->bi_status = bio->bi_status;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200121 bio_endio(bio_orig);
David Howells831058d2006-08-29 19:06:00 +0100122 bio_put(bio);
123}
124
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200125static void bounce_end_io_write(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100126{
Christoph Hellwigce288e02021-03-31 09:29:59 +0200127 bounce_end_io(bio);
David Howells831058d2006-08-29 19:06:00 +0100128}
129
Christoph Hellwigce288e02021-03-31 09:29:59 +0200130static void bounce_end_io_read(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100131{
132 struct bio *bio_orig = bio->bi_private;
133
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200134 if (!bio->bi_status)
David Howells831058d2006-08-29 19:06:00 +0100135 copy_to_high_bio_irq(bio_orig, bio);
136
Christoph Hellwigce288e02021-03-31 09:29:59 +0200137 bounce_end_io(bio);
David Howells831058d2006-08-29 19:06:00 +0100138}
139
Christoph Hellwigebfe4182021-02-24 08:24:06 +0100140static struct bio *bounce_clone_bio(struct bio *bio_src)
Christoph Hellwigc55183c2018-07-24 09:52:34 +0200141{
142 struct bvec_iter iter;
143 struct bio_vec bv;
144 struct bio *bio;
145
146 /*
147 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
148 * bio_src->bi_io_vec to bio->bi_io_vec.
149 *
150 * We can't do that anymore, because:
151 *
152 * - The point of cloning the biovec is to produce a bio with a biovec
153 * the caller can modify: bi_idx and bi_bvec_done should be 0.
154 *
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100155 * - The original bio could've had more than BIO_MAX_VECS biovecs; if
Christoph Hellwigc55183c2018-07-24 09:52:34 +0200156 * we tried to clone the whole thing bio_alloc_bioset() would fail.
157 * But the clone should succeed as long as the number of biovecs we
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100158 * actually need to allocate is fewer than BIO_MAX_VECS.
Christoph Hellwigc55183c2018-07-24 09:52:34 +0200159 *
160 * - Lastly, bi_vcnt should not be looked at or relied upon by code
161 * that does not own the bio - reason being drivers don't use it for
162 * iterating over the biovec anymore, so expecting it to be kept up
163 * to date (i.e. for clones that share the parent biovec) is just
164 * asking for trouble and would force extra work on
165 * __bio_clone_fast() anyways.
166 */
Christoph Hellwig393bb122021-03-31 09:30:01 +0200167 bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
168 &bounce_bio_set);
Christoph Hellwig309dca302021-01-24 11:02:34 +0100169 bio->bi_bdev = bio_src->bi_bdev;
Christoph Hellwig46bbf652021-01-26 15:33:08 +0100170 if (bio_flagged(bio_src, BIO_REMAPPED))
171 bio_set_flag(bio, BIO_REMAPPED);
Christoph Hellwigc55183c2018-07-24 09:52:34 +0200172 bio->bi_opf = bio_src->bi_opf;
Hannes Reineckeca474b72018-11-12 10:35:25 -0700173 bio->bi_ioprio = bio_src->bi_ioprio;
Christoph Hellwigc55183c2018-07-24 09:52:34 +0200174 bio->bi_write_hint = bio_src->bi_write_hint;
175 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
176 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
177
178 switch (bio_op(bio)) {
179 case REQ_OP_DISCARD:
180 case REQ_OP_SECURE_ERASE:
181 case REQ_OP_WRITE_ZEROES:
182 break;
183 case REQ_OP_WRITE_SAME:
184 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
185 break;
186 default:
187 bio_for_each_segment(bv, bio_src, iter)
188 bio->bi_io_vec[bio->bi_vcnt++] = bv;
189 break;
190 }
191
Christoph Hellwigebfe4182021-02-24 08:24:06 +0100192 if (bio_crypt_clone(bio, bio_src, GFP_NOIO) < 0)
Eric Biggers07560152020-09-15 20:53:13 -0700193 goto err_put;
Satya Tangiralaa892c8d2020-05-14 00:37:18 +0000194
Eric Biggers07560152020-09-15 20:53:13 -0700195 if (bio_integrity(bio_src) &&
Christoph Hellwigebfe4182021-02-24 08:24:06 +0100196 bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0)
Eric Biggers07560152020-09-15 20:53:13 -0700197 goto err_put;
Christoph Hellwigc55183c2018-07-24 09:52:34 +0200198
Dennis Zhoudb6638d2018-12-05 12:10:35 -0500199 bio_clone_blkg_association(bio, bio_src);
Dennis Zhoue439bed2018-12-05 12:10:32 -0500200 blkcg_bio_issue_init(bio);
Dennis Zhou (Facebook)5bf9a1f2018-09-11 14:41:30 -0400201
Christoph Hellwigc55183c2018-07-24 09:52:34 +0200202 return bio;
Eric Biggers07560152020-09-15 20:53:13 -0700203
204err_put:
205 bio_put(bio);
206 return NULL;
Christoph Hellwigc55183c2018-07-24 09:52:34 +0200207}
208
Christoph Hellwig9bb33f22021-03-31 09:30:00 +0200209void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
David Howells831058d2006-08-29 19:06:00 +0100210{
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700211 struct bio *bio;
212 int rw = bio_data_dir(*bio_orig);
Kent Overstreet79886132013-11-23 17:19:00 -0800213 struct bio_vec *to, from;
214 struct bvec_iter iter;
NeilBrowna8821f32017-06-18 14:38:58 +1000215 unsigned i = 0;
216 bool bounce = false;
217 int sectors = 0;
David Howells831058d2006-08-29 19:06:00 +0100218
NeilBrowna8821f32017-06-18 14:38:58 +1000219 bio_for_each_segment(from, *bio_orig, iter) {
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100220 if (i++ < BIO_MAX_VECS)
NeilBrowna8821f32017-06-18 14:38:58 +1000221 sectors += from.bv_len >> 9;
Christoph Hellwig9bb33f22021-03-31 09:30:00 +0200222 if (PageHighMem(from.bv_page))
NeilBrowna8821f32017-06-18 14:38:58 +1000223 bounce = true;
224 }
225 if (!bounce)
226 return;
David Howells831058d2006-08-29 19:06:00 +0100227
Christoph Hellwig393bb122021-03-31 09:30:01 +0200228 if (sectors < bio_sectors(*bio_orig)) {
Kent Overstreet338aa962018-05-20 18:25:47 -0400229 bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
NeilBrowna8821f32017-06-18 14:38:58 +1000230 bio_chain(bio, *bio_orig);
Christoph Hellwiged00aab2020-07-01 10:59:44 +0200231 submit_bio_noacct(*bio_orig);
NeilBrowna8821f32017-06-18 14:38:58 +1000232 *bio_orig = bio;
233 }
Christoph Hellwigebfe4182021-02-24 08:24:06 +0100234 bio = bounce_clone_bio(*bio_orig);
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700235
Ming Lei8f4e80d2019-02-21 23:43:36 +0800236 /*
237 * Bvec table can't be updated by bio_for_each_segment_all(),
238 * so retrieve bvec from the table directly. This way is safe
239 * because the 'bio' is single-page bvec.
240 */
241 for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700242 struct page *page = to->bv_page;
243
Christoph Hellwig9bb33f22021-03-31 09:30:00 +0200244 if (!PageHighMem(page))
David Howells831058d2006-08-29 19:06:00 +0100245 continue;
246
Christoph Hellwigce288e02021-03-31 09:29:59 +0200247 to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
Wang YanQing393a3392015-04-26 16:43:31 +0800248 inc_zone_page_state(to->bv_page, NR_BOUNCE);
David Howells831058d2006-08-29 19:06:00 +0100249
250 if (rw == WRITE) {
251 char *vto, *vfrom;
252
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700253 flush_dcache_page(page);
254
David Howells831058d2006-08-29 19:06:00 +0100255 vto = page_address(to->bv_page) + to->bv_offset;
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700256 vfrom = kmap_atomic(page) + to->bv_offset;
David Howells831058d2006-08-29 19:06:00 +0100257 memcpy(vto, vfrom, to->bv_len);
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700258 kunmap_atomic(vfrom);
David Howells831058d2006-08-29 19:06:00 +0100259 }
260 }
261
Christoph Hellwige8a676d2020-12-03 17:21:36 +0100262 trace_block_bio_bounce(*bio_orig);
Jens Axboec43a5082007-01-12 12:20:26 +0100263
David Howells831058d2006-08-29 19:06:00 +0100264 bio->bi_flags |= (1 << BIO_BOUNCED);
David Howells831058d2006-08-29 19:06:00 +0100265
Christoph Hellwigce288e02021-03-31 09:29:59 +0200266 if (rw == READ)
267 bio->bi_end_io = bounce_end_io_read;
268 else
David Howells831058d2006-08-29 19:06:00 +0100269 bio->bi_end_io = bounce_end_io_write;
David Howells831058d2006-08-29 19:06:00 +0100270
271 bio->bi_private = *bio_orig;
272 *bio_orig = bio;
273}