Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 2 | /* bounce buffer handling for block devices |
| 3 | * |
| 4 | * - Split from highmem.c |
| 5 | */ |
| 6 | |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 8 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 9 | #include <linux/mm.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 10 | #include <linux/export.h> |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 11 | #include <linux/swap.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/gfp.h> |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 13 | #include <linux/bio.h> |
| 14 | #include <linux/pagemap.h> |
| 15 | #include <linux/mempool.h> |
| 16 | #include <linux/blkdev.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 17 | #include <linux/backing-dev.h> |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 18 | #include <linux/init.h> |
| 19 | #include <linux/hash.h> |
| 20 | #include <linux/highmem.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 21 | #include <linux/memblock.h> |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 22 | #include <linux/printk.h> |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 23 | #include <asm/tlbflush.h> |
| 24 | |
Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 25 | #include <trace/events/block.h> |
Christoph Hellwig | 3bce016 | 2017-06-19 09:26:21 +0200 | [diff] [blame] | 26 | #include "blk.h" |
Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 27 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 28 | #define POOL_SIZE 64 |
| 29 | #define ISA_POOL_SIZE 16 |
| 30 | |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 31 | static struct bio_set bounce_bio_set, bounce_bio_split; |
| 32 | static mempool_t page_pool, isa_page_pool; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 33 | |
Jens Axboe | 52990a5 | 2018-10-21 12:02:36 -0600 | [diff] [blame] | 34 | static void init_bounce_bioset(void) |
| 35 | { |
| 36 | static bool bounce_bs_setup; |
| 37 | int ret; |
| 38 | |
| 39 | if (bounce_bs_setup) |
| 40 | return; |
| 41 | |
| 42 | ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); |
| 43 | BUG_ON(ret); |
| 44 | if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE)) |
| 45 | BUG_ON(1); |
| 46 | |
| 47 | ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0); |
| 48 | BUG_ON(ret); |
| 49 | bounce_bs_setup = true; |
| 50 | } |
| 51 | |
Arnd Bergmann | a687a53 | 2018-03-07 23:30:54 +0100 | [diff] [blame] | 52 | #if defined(CONFIG_HIGHMEM) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 53 | static __init int init_emergency_pool(void) |
| 54 | { |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 55 | int ret; |
Chris Metcalf | f100625 | 2012-06-16 16:41:05 -0400 | [diff] [blame] | 56 | #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) |
David Vrabel | 3bcfeaf | 2011-10-20 21:24:30 +0200 | [diff] [blame] | 57 | if (max_pfn <= max_low_pfn) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 58 | return 0; |
David Vrabel | 3bcfeaf | 2011-10-20 21:24:30 +0200 | [diff] [blame] | 59 | #endif |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 60 | |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 61 | ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0); |
| 62 | BUG_ON(ret); |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 63 | pr_info("pool size: %d pages\n", POOL_SIZE); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 64 | |
Jens Axboe | 52990a5 | 2018-10-21 12:02:36 -0600 | [diff] [blame] | 65 | init_bounce_bioset(); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 66 | return 0; |
| 67 | } |
| 68 | |
| 69 | __initcall(init_emergency_pool); |
Chris Metcalf | f100625 | 2012-06-16 16:41:05 -0400 | [diff] [blame] | 70 | #endif |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 71 | |
Chris Metcalf | f100625 | 2012-06-16 16:41:05 -0400 | [diff] [blame] | 72 | #ifdef CONFIG_HIGHMEM |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 73 | /* |
| 74 | * highmem version, map in to vec |
| 75 | */ |
| 76 | static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) |
| 77 | { |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 78 | unsigned char *vto; |
| 79 | |
Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 80 | vto = kmap_atomic(to->bv_page); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 81 | memcpy(vto + to->bv_offset, vfrom, to->bv_len); |
Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 82 | kunmap_atomic(vto); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | #else /* CONFIG_HIGHMEM */ |
| 86 | |
| 87 | #define bounce_copy_vec(to, vfrom) \ |
| 88 | memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) |
| 89 | |
| 90 | #endif /* CONFIG_HIGHMEM */ |
| 91 | |
| 92 | /* |
| 93 | * allocate pages in the DMA region for the ISA pool |
| 94 | */ |
| 95 | static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) |
| 96 | { |
| 97 | return mempool_alloc_pages(gfp_mask | GFP_DMA, data); |
| 98 | } |
| 99 | |
Jens Axboe | 52990a5 | 2018-10-21 12:02:36 -0600 | [diff] [blame] | 100 | static DEFINE_MUTEX(isa_mutex); |
| 101 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 102 | /* |
| 103 | * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA |
| 104 | * as the max address, so check if the pool has already been created. |
| 105 | */ |
| 106 | int init_emergency_isa_pool(void) |
| 107 | { |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 108 | int ret; |
| 109 | |
Jens Axboe | 52990a5 | 2018-10-21 12:02:36 -0600 | [diff] [blame] | 110 | mutex_lock(&isa_mutex); |
| 111 | |
| 112 | if (mempool_initialized(&isa_page_pool)) { |
| 113 | mutex_unlock(&isa_mutex); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 114 | return 0; |
Jens Axboe | 52990a5 | 2018-10-21 12:02:36 -0600 | [diff] [blame] | 115 | } |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 116 | |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 117 | ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa, |
| 118 | mempool_free_pages, (void *) 0); |
| 119 | BUG_ON(ret); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 120 | |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 121 | pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE); |
Jens Axboe | 52990a5 | 2018-10-21 12:02:36 -0600 | [diff] [blame] | 122 | init_bounce_bioset(); |
| 123 | mutex_unlock(&isa_mutex); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 124 | return 0; |
| 125 | } |
| 126 | |
| 127 | /* |
| 128 | * Simple bounce buffer support for highmem pages. Depending on the |
| 129 | * queue gfp mask set, *to may or may not be a highmem page. kmap it |
| 130 | * always, it will do the Right Thing |
| 131 | */ |
| 132 | static void copy_to_high_bio_irq(struct bio *to, struct bio *from) |
| 133 | { |
| 134 | unsigned char *vfrom; |
Ming Lei | 3c892a0 | 2017-12-18 20:22:07 +0800 | [diff] [blame] | 135 | struct bio_vec tovec, fromvec; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 136 | struct bvec_iter iter; |
Ming Lei | 3c892a0 | 2017-12-18 20:22:07 +0800 | [diff] [blame] | 137 | /* |
| 138 | * The bio of @from is created by bounce, so we can iterate |
| 139 | * its bvec from start to end, but the @from->bi_iter can't be |
| 140 | * trusted because it might be changed by splitting. |
| 141 | */ |
| 142 | struct bvec_iter from_iter = BVEC_ITER_ALL_INIT; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 143 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 144 | bio_for_each_segment(tovec, to, iter) { |
Ming Lei | 3c892a0 | 2017-12-18 20:22:07 +0800 | [diff] [blame] | 145 | fromvec = bio_iter_iovec(from, from_iter); |
| 146 | if (tovec.bv_page != fromvec.bv_page) { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 147 | /* |
| 148 | * fromvec->bv_offset and fromvec->bv_len might have |
| 149 | * been modified by the block layer, so use the original |
| 150 | * copy, bounce_copy_vec already uses tovec->bv_len |
| 151 | */ |
Ming Lei | 3c892a0 | 2017-12-18 20:22:07 +0800 | [diff] [blame] | 152 | vfrom = page_address(fromvec.bv_page) + |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 153 | tovec.bv_offset; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 154 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 155 | bounce_copy_vec(&tovec, vfrom); |
| 156 | flush_dcache_page(tovec.bv_page); |
| 157 | } |
Ming Lei | 3c892a0 | 2017-12-18 20:22:07 +0800 | [diff] [blame] | 158 | bio_advance_iter(from, &from_iter, tovec.bv_len); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 159 | } |
| 160 | } |
| 161 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 162 | static void bounce_end_io(struct bio *bio, mempool_t *pool) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 163 | { |
| 164 | struct bio *bio_orig = bio->bi_private; |
Ming Lei | 7891f05 | 2017-12-18 20:22:06 +0800 | [diff] [blame] | 165 | struct bio_vec *bvec, orig_vec; |
Ming Lei | 7891f05 | 2017-12-18 20:22:06 +0800 | [diff] [blame] | 166 | struct bvec_iter orig_iter = bio_orig->bi_iter; |
Ming Lei | 6dc4f10 | 2019-02-15 19:13:19 +0800 | [diff] [blame] | 167 | struct bvec_iter_all iter_all; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 168 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 169 | /* |
| 170 | * free up bounce indirect pages used |
| 171 | */ |
Christoph Hellwig | 2b070cf | 2019-04-25 09:03:00 +0200 | [diff] [blame] | 172 | bio_for_each_segment_all(bvec, bio, iter_all) { |
Ming Lei | 7891f05 | 2017-12-18 20:22:06 +0800 | [diff] [blame] | 173 | orig_vec = bio_iter_iovec(bio_orig, orig_iter); |
| 174 | if (bvec->bv_page != orig_vec.bv_page) { |
| 175 | dec_zone_page_state(bvec->bv_page, NR_BOUNCE); |
| 176 | mempool_free(bvec->bv_page, pool); |
| 177 | } |
| 178 | bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 179 | } |
| 180 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 181 | bio_orig->bi_status = bio->bi_status; |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 182 | bio_endio(bio_orig); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 183 | bio_put(bio); |
| 184 | } |
| 185 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 186 | static void bounce_end_io_write(struct bio *bio) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 187 | { |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 188 | bounce_end_io(bio, &page_pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 189 | } |
| 190 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 191 | static void bounce_end_io_write_isa(struct bio *bio) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 192 | { |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 193 | |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 194 | bounce_end_io(bio, &isa_page_pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 195 | } |
| 196 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 197 | static void __bounce_end_io_read(struct bio *bio, mempool_t *pool) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 198 | { |
| 199 | struct bio *bio_orig = bio->bi_private; |
| 200 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 201 | if (!bio->bi_status) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 202 | copy_to_high_bio_irq(bio_orig, bio); |
| 203 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 204 | bounce_end_io(bio, pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 205 | } |
| 206 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 207 | static void bounce_end_io_read(struct bio *bio) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 208 | { |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 209 | __bounce_end_io_read(bio, &page_pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 210 | } |
| 211 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 212 | static void bounce_end_io_read_isa(struct bio *bio) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 213 | { |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 214 | __bounce_end_io_read(bio, &isa_page_pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 215 | } |
| 216 | |
Christoph Hellwig | c55183c | 2018-07-24 09:52:34 +0200 | [diff] [blame] | 217 | static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask, |
| 218 | struct bio_set *bs) |
| 219 | { |
| 220 | struct bvec_iter iter; |
| 221 | struct bio_vec bv; |
| 222 | struct bio *bio; |
| 223 | |
| 224 | /* |
| 225 | * Pre immutable biovecs, __bio_clone() used to just do a memcpy from |
| 226 | * bio_src->bi_io_vec to bio->bi_io_vec. |
| 227 | * |
| 228 | * We can't do that anymore, because: |
| 229 | * |
| 230 | * - The point of cloning the biovec is to produce a bio with a biovec |
| 231 | * the caller can modify: bi_idx and bi_bvec_done should be 0. |
| 232 | * |
| 233 | * - The original bio could've had more than BIO_MAX_PAGES biovecs; if |
| 234 | * we tried to clone the whole thing bio_alloc_bioset() would fail. |
| 235 | * But the clone should succeed as long as the number of biovecs we |
| 236 | * actually need to allocate is fewer than BIO_MAX_PAGES. |
| 237 | * |
| 238 | * - Lastly, bi_vcnt should not be looked at or relied upon by code |
| 239 | * that does not own the bio - reason being drivers don't use it for |
| 240 | * iterating over the biovec anymore, so expecting it to be kept up |
| 241 | * to date (i.e. for clones that share the parent biovec) is just |
| 242 | * asking for trouble and would force extra work on |
| 243 | * __bio_clone_fast() anyways. |
| 244 | */ |
| 245 | |
| 246 | bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); |
| 247 | if (!bio) |
| 248 | return NULL; |
| 249 | bio->bi_disk = bio_src->bi_disk; |
| 250 | bio->bi_opf = bio_src->bi_opf; |
Hannes Reinecke | ca474b7 | 2018-11-12 10:35:25 -0700 | [diff] [blame] | 251 | bio->bi_ioprio = bio_src->bi_ioprio; |
Christoph Hellwig | c55183c | 2018-07-24 09:52:34 +0200 | [diff] [blame] | 252 | bio->bi_write_hint = bio_src->bi_write_hint; |
| 253 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; |
| 254 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; |
| 255 | |
| 256 | switch (bio_op(bio)) { |
| 257 | case REQ_OP_DISCARD: |
| 258 | case REQ_OP_SECURE_ERASE: |
| 259 | case REQ_OP_WRITE_ZEROES: |
| 260 | break; |
| 261 | case REQ_OP_WRITE_SAME: |
| 262 | bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; |
| 263 | break; |
| 264 | default: |
| 265 | bio_for_each_segment(bv, bio_src, iter) |
| 266 | bio->bi_io_vec[bio->bi_vcnt++] = bv; |
| 267 | break; |
| 268 | } |
| 269 | |
Eric Biggers | 0756015 | 2020-09-15 20:53:13 -0700 | [diff] [blame] | 270 | if (bio_crypt_clone(bio, bio_src, gfp_mask) < 0) |
| 271 | goto err_put; |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 272 | |
Eric Biggers | 0756015 | 2020-09-15 20:53:13 -0700 | [diff] [blame] | 273 | if (bio_integrity(bio_src) && |
| 274 | bio_integrity_clone(bio, bio_src, gfp_mask) < 0) |
| 275 | goto err_put; |
Christoph Hellwig | c55183c | 2018-07-24 09:52:34 +0200 | [diff] [blame] | 276 | |
Dennis Zhou | db6638d | 2018-12-05 12:10:35 -0500 | [diff] [blame] | 277 | bio_clone_blkg_association(bio, bio_src); |
Dennis Zhou | e439bed | 2018-12-05 12:10:32 -0500 | [diff] [blame] | 278 | blkcg_bio_issue_init(bio); |
Dennis Zhou (Facebook) | 5bf9a1f | 2018-09-11 14:41:30 -0400 | [diff] [blame] | 279 | |
Christoph Hellwig | c55183c | 2018-07-24 09:52:34 +0200 | [diff] [blame] | 280 | return bio; |
Eric Biggers | 0756015 | 2020-09-15 20:53:13 -0700 | [diff] [blame] | 281 | |
| 282 | err_put: |
| 283 | bio_put(bio); |
| 284 | return NULL; |
Christoph Hellwig | c55183c | 2018-07-24 09:52:34 +0200 | [diff] [blame] | 285 | } |
| 286 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 287 | static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, |
Jan Kara | a3ad0a9 | 2015-06-18 17:19:14 +0200 | [diff] [blame] | 288 | mempool_t *pool) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 289 | { |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 290 | struct bio *bio; |
| 291 | int rw = bio_data_dir(*bio_orig); |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 292 | struct bio_vec *to, from; |
| 293 | struct bvec_iter iter; |
NeilBrown | a8821f3 | 2017-06-18 14:38:58 +1000 | [diff] [blame] | 294 | unsigned i = 0; |
| 295 | bool bounce = false; |
| 296 | int sectors = 0; |
Ming Lei | 14cb0dc | 2017-12-18 15:40:43 +0800 | [diff] [blame] | 297 | bool passthrough = bio_is_passthrough(*bio_orig); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 298 | |
NeilBrown | a8821f3 | 2017-06-18 14:38:58 +1000 | [diff] [blame] | 299 | bio_for_each_segment(from, *bio_orig, iter) { |
| 300 | if (i++ < BIO_MAX_PAGES) |
| 301 | sectors += from.bv_len >> 9; |
Christoph Hellwig | 1c4bc3a | 2017-06-19 09:26:22 +0200 | [diff] [blame] | 302 | if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn) |
NeilBrown | a8821f3 | 2017-06-18 14:38:58 +1000 | [diff] [blame] | 303 | bounce = true; |
| 304 | } |
| 305 | if (!bounce) |
| 306 | return; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 307 | |
Ming Lei | 14cb0dc | 2017-12-18 15:40:43 +0800 | [diff] [blame] | 308 | if (!passthrough && sectors < bio_sectors(*bio_orig)) { |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 309 | bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split); |
NeilBrown | a8821f3 | 2017-06-18 14:38:58 +1000 | [diff] [blame] | 310 | bio_chain(bio, *bio_orig); |
Christoph Hellwig | ed00aab | 2020-07-01 10:59:44 +0200 | [diff] [blame] | 311 | submit_bio_noacct(*bio_orig); |
NeilBrown | a8821f3 | 2017-06-18 14:38:58 +1000 | [diff] [blame] | 312 | *bio_orig = bio; |
| 313 | } |
Christoph Hellwig | c55183c | 2018-07-24 09:52:34 +0200 | [diff] [blame] | 314 | bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL : |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 315 | &bounce_bio_set); |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 316 | |
Ming Lei | 8f4e80d | 2019-02-21 23:43:36 +0800 | [diff] [blame] | 317 | /* |
| 318 | * Bvec table can't be updated by bio_for_each_segment_all(), |
| 319 | * so retrieve bvec from the table directly. This way is safe |
| 320 | * because the 'bio' is single-page bvec. |
| 321 | */ |
| 322 | for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) { |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 323 | struct page *page = to->bv_page; |
| 324 | |
Christoph Hellwig | 1c4bc3a | 2017-06-19 09:26:22 +0200 | [diff] [blame] | 325 | if (page_to_pfn(page) <= q->limits.bounce_pfn) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 326 | continue; |
| 327 | |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 328 | to->bv_page = mempool_alloc(pool, q->bounce_gfp); |
Wang YanQing | 393a339 | 2015-04-26 16:43:31 +0800 | [diff] [blame] | 329 | inc_zone_page_state(to->bv_page, NR_BOUNCE); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 330 | |
| 331 | if (rw == WRITE) { |
| 332 | char *vto, *vfrom; |
| 333 | |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 334 | flush_dcache_page(page); |
| 335 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 336 | vto = page_address(to->bv_page) + to->bv_offset; |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 337 | vfrom = kmap_atomic(page) + to->bv_offset; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 338 | memcpy(vto, vfrom, to->bv_len); |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 339 | kunmap_atomic(vfrom); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 340 | } |
| 341 | } |
| 342 | |
Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 343 | trace_block_bio_bounce(q, *bio_orig); |
Jens Axboe | c43a508 | 2007-01-12 12:20:26 +0100 | [diff] [blame] | 344 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 345 | bio->bi_flags |= (1 << BIO_BOUNCED); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 346 | |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 347 | if (pool == &page_pool) { |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 348 | bio->bi_end_io = bounce_end_io_write; |
| 349 | if (rw == READ) |
| 350 | bio->bi_end_io = bounce_end_io_read; |
| 351 | } else { |
| 352 | bio->bi_end_io = bounce_end_io_write_isa; |
| 353 | if (rw == READ) |
| 354 | bio->bi_end_io = bounce_end_io_read_isa; |
| 355 | } |
| 356 | |
| 357 | bio->bi_private = *bio_orig; |
| 358 | *bio_orig = bio; |
| 359 | } |
| 360 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 361 | void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 362 | { |
| 363 | mempool_t *pool; |
| 364 | |
| 365 | /* |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 366 | * Data-less bio, nothing to bounce |
| 367 | */ |
Jens Axboe | 3614407 | 2008-08-14 13:12:15 +0200 | [diff] [blame] | 368 | if (!bio_has_data(*bio_orig)) |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 369 | return; |
| 370 | |
| 371 | /* |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 372 | * for non-isa bounce case, just check if the bounce pfn is equal |
| 373 | * to or bigger than the highest pfn in the system -- in that case, |
| 374 | * don't waste time iterating over bio segments |
| 375 | */ |
| 376 | if (!(q->bounce_gfp & GFP_DMA)) { |
Christoph Hellwig | 1c4bc3a | 2017-06-19 09:26:22 +0200 | [diff] [blame] | 377 | if (q->limits.bounce_pfn >= blk_max_pfn) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 378 | return; |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 379 | pool = &page_pool; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 380 | } else { |
Kent Overstreet | 338aa96 | 2018-05-20 18:25:47 -0400 | [diff] [blame] | 381 | BUG_ON(!mempool_initialized(&isa_page_pool)); |
| 382 | pool = &isa_page_pool; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 383 | } |
| 384 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 385 | /* |
| 386 | * slow path |
| 387 | */ |
Jan Kara | a3ad0a9 | 2015-06-18 17:19:14 +0200 | [diff] [blame] | 388 | __blk_queue_bounce(q, bio_orig, pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 389 | } |