blob: cb68888241084e33af2d7e11753e048431cf7837 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * 2.5 block I/O model
3 *
4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Tejun Heo7cc01582010-08-03 13:14:58 +020012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 */
20#ifndef __LINUX_BIO_H
21#define __LINUX_BIO_H
22
23#include <linux/highmem.h>
24#include <linux/mempool.h>
Jens Axboe22e2c502005-06-27 10:55:12 +020025#include <linux/ioprio.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050026#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
David Howells02a5e0a2007-08-11 22:34:32 +020028#ifdef CONFIG_BLOCK
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/io.h>
31
Tejun Heo7cc01582010-08-03 13:14:58 +020032/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
33#include <linux/blk_types.h>
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#define BIO_DEBUG
36
37#ifdef BIO_DEBUG
38#define BIO_BUG_ON BUG_ON
39#else
40#define BIO_BUG_ON
41#endif
42
Alexey Dobriyand84a8472006-06-25 05:49:32 -070043#define BIO_MAX_PAGES 256
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46
47/*
Jens Axboe22e2c502005-06-27 10:55:12 +020048 * upper 16 bits of bi_rw define the io priority of this bio
49 */
50#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
51#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
52#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
53
54#define bio_set_prio(bio, prio) do { \
55 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
58} while (0)
59
60/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 * various member access, note that bio_data should of course not be used
62 * on highmem page vectors
63 */
Kent Overstreet4550dd62013-08-07 14:26:21 -070064#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
Kent Overstreeta4ad39b12013-08-07 14:24:32 -070065
Kent Overstreet4550dd62013-08-07 14:26:21 -070066#define bvec_iter_page(bvec, iter) \
67 (__bvec_iter_bvec((bvec), (iter))->bv_page)
68
69#define bvec_iter_len(bvec, iter) \
70 min((iter).bi_size, \
71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
72
73#define bvec_iter_offset(bvec, iter) \
74 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
75
76#define bvec_iter_bvec(bvec, iter) \
77((struct bio_vec) { \
78 .bv_page = bvec_iter_page((bvec), (iter)), \
79 .bv_len = bvec_iter_len((bvec), (iter)), \
80 .bv_offset = bvec_iter_offset((bvec), (iter)), \
81})
82
83#define bio_iter_iovec(bio, iter) \
84 bvec_iter_bvec((bio)->bi_io_vec, (iter))
85
86#define bio_iter_page(bio, iter) \
87 bvec_iter_page((bio)->bi_io_vec, (iter))
88#define bio_iter_len(bio, iter) \
89 bvec_iter_len((bio)->bi_io_vec, (iter))
90#define bio_iter_offset(bio, iter) \
91 bvec_iter_offset((bio)->bi_io_vec, (iter))
92
93#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
94#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
95#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
Kent Overstreet79886132013-11-23 17:19:00 -080096
Kent Overstreet458b76e2013-09-24 16:26:05 -070097#define bio_multiple_segments(bio) \
98 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
Kent Overstreet4f024f32013-10-11 15:44:27 -070099#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
100#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
Jens Axboebf2de6f2007-09-27 13:01:25 +0200101
Kent Overstreet458b76e2013-09-24 16:26:05 -0700102/*
103 * Check whether this bio carries any data or not. A NULL bio is allowed.
104 */
105static inline bool bio_has_data(struct bio *bio)
106{
107 if (bio &&
108 bio->bi_iter.bi_size &&
109 !(bio->bi_rw & REQ_DISCARD))
110 return true;
111
112 return false;
113}
114
115static inline bool bio_is_rw(struct bio *bio)
116{
117 if (!bio_has_data(bio))
118 return false;
119
120 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
121 return false;
122
123 return true;
124}
125
126static inline bool bio_mergeable(struct bio *bio)
127{
128 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
129 return false;
130
131 return true;
132}
133
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900134static inline unsigned int bio_cur_bytes(struct bio *bio)
Jens Axboebf2de6f2007-09-27 13:01:25 +0200135{
Kent Overstreet458b76e2013-09-24 16:26:05 -0700136 if (bio_has_data(bio))
Kent Overstreeta4ad39b12013-08-07 14:24:32 -0700137 return bio_iovec(bio).bv_len;
David Woodhousefb2dce82008-08-05 18:01:53 +0100138 else /* dataless requests such as discard */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700139 return bio->bi_iter.bi_size;
Jens Axboebf2de6f2007-09-27 13:01:25 +0200140}
141
142static inline void *bio_data(struct bio *bio)
143{
Kent Overstreet458b76e2013-09-24 16:26:05 -0700144 if (bio_has_data(bio))
Jens Axboebf2de6f2007-09-27 13:01:25 +0200145 return page_address(bio_page(bio)) + bio_offset(bio);
146
147 return NULL;
148}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150/*
151 * will die
152 */
153#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
154#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
155
156/*
157 * queues that have highmem support enabled may still need to revert to
158 * PIO transfers occasionally and thus map high pages temporarily. For
159 * permanent PIO fall back, user is probably better off disabling highmem
160 * I/O completely on that queue (see ide-dma for example)
161 */
Kent Overstreetf619d252013-08-07 14:30:33 -0700162#define __bio_kmap_atomic(bio, iter) \
163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
164 bio_iter_iovec((bio), (iter)).bv_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Kent Overstreetf619d252013-08-07 14:30:33 -0700166#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168/*
169 * merge helpers etc
170 */
171
Jeremy Fitzhardingef92131c2008-10-29 14:10:51 +0100172/* Default implementation of BIOVEC_PHYS_MERGEABLE */
173#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
174 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/*
177 * allow arch override, for eg virtualized architectures (put in asm/io.h)
178 */
179#ifndef BIOVEC_PHYS_MERGEABLE
180#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
Jeremy Fitzhardingef92131c2008-10-29 14:10:51 +0100181 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182#endif
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
185 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Jens Axboe66cb45a2014-06-24 16:22:24 -0600189/*
Kent Overstreetd74c6d52013-02-06 12:23:11 -0800190 * drivers should _never_ use the all version - the bio may have been split
191 * before it got to the driver and the driver won't own all of it
192 */
193#define bio_for_each_segment_all(bvl, bio, i) \
Kent Overstreetf619d252013-08-07 14:30:33 -0700194 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
Kent Overstreetd74c6d52013-02-06 12:23:11 -0800195
Kent Overstreet4550dd62013-08-07 14:26:21 -0700196static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
197 unsigned bytes)
198{
199 WARN_ONCE(bytes > iter->bi_size,
200 "Attempted to advance past end of bvec iter\n");
201
202 while (bytes) {
203 unsigned len = min(bytes, bvec_iter_len(bv, *iter));
204
205 bytes -= len;
206 iter->bi_size -= len;
207 iter->bi_bvec_done += len;
208
209 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
210 iter->bi_bvec_done = 0;
211 iter->bi_idx++;
212 }
213 }
214}
215
216#define for_each_bvec(bvl, bio_vec, iter, start) \
Martin K. Petersenb7aa84d2014-04-08 22:43:43 -0400217 for (iter = (start); \
218 (iter).bi_size && \
219 ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
Kent Overstreet4550dd62013-08-07 14:26:21 -0700220 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
221
222
223static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
224 unsigned bytes)
225{
226 iter->bi_sector += bytes >> 9;
227
228 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
229 iter->bi_size -= bytes;
230 else
231 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
232}
233
Kent Overstreet79886132013-11-23 17:19:00 -0800234#define __bio_for_each_segment(bvl, bio, iter, start) \
235 for (iter = (start); \
Kent Overstreet4550dd62013-08-07 14:26:21 -0700236 (iter).bi_size && \
237 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
238 bio_advance_iter((bio), &(iter), (bvl).bv_len))
Kent Overstreet79886132013-11-23 17:19:00 -0800239
240#define bio_for_each_segment(bvl, bio, iter) \
241 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
242
Kent Overstreet4550dd62013-08-07 14:26:21 -0700243#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Kent Overstreet458b76e2013-09-24 16:26:05 -0700245static inline unsigned bio_segments(struct bio *bio)
246{
247 unsigned segs = 0;
248 struct bio_vec bv;
249 struct bvec_iter iter;
250
Kent Overstreet8423ae32014-02-10 17:45:50 -0800251 /*
252 * We special case discard/write same, because they interpret bi_size
253 * differently:
254 */
255
256 if (bio->bi_rw & REQ_DISCARD)
257 return 1;
258
259 if (bio->bi_rw & REQ_WRITE_SAME)
260 return 1;
261
Kent Overstreet458b76e2013-09-24 16:26:05 -0700262 bio_for_each_segment(bv, bio, iter)
263 segs++;
264
265 return segs;
266}
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/*
269 * get a reference to a bio, so it won't disappear. the intended use is
270 * something like:
271 *
272 * bio_get(bio);
273 * submit_bio(rw, bio);
274 * if (bio->bi_flags ...)
275 * do_something
276 * bio_put(bio);
277 *
278 * without the bio_get(), it could potentially complete I/O before submit_bio
279 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
280 * runs
281 */
Jens Axboedac56212015-04-17 16:23:59 -0600282static inline void bio_get(struct bio *bio)
283{
284 bio->bi_flags |= (1 << BIO_REFFED);
285 smp_mb__before_atomic();
286 atomic_inc(&bio->__bi_cnt);
287}
288
289static inline void bio_cnt_set(struct bio *bio, unsigned int count)
290{
291 if (count != 1) {
292 bio->bi_flags |= (1 << BIO_REFFED);
293 smp_mb__before_atomic();
294 }
295 atomic_set(&bio->__bi_cnt, count);
296}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600298static inline bool bio_flagged(struct bio *bio, unsigned int bit)
299{
Jens Axboe2c68f6d2015-07-28 13:14:32 -0600300 return (bio->bi_flags & (1U << bit)) != 0;
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600301}
302
303static inline void bio_set_flag(struct bio *bio, unsigned int bit)
304{
Jens Axboe2c68f6d2015-07-28 13:14:32 -0600305 bio->bi_flags |= (1U << bit);
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600306}
307
308static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
309{
Jens Axboe2c68f6d2015-07-28 13:14:32 -0600310 bio->bi_flags &= ~(1U << bit);
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600311}
312
Ming Lei7bcd79a2016-02-26 23:40:50 +0800313static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
314{
315 *bv = bio_iovec(bio);
316}
317
318static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
319{
320 struct bvec_iter iter = bio->bi_iter;
321 int idx;
322
323 if (!bio_flagged(bio, BIO_CLONED)) {
324 *bv = bio->bi_io_vec[bio->bi_vcnt - 1];
325 return;
326 }
327
328 if (unlikely(!bio_multiple_segments(bio))) {
329 *bv = bio_iovec(bio);
330 return;
331 }
332
333 bio_advance_iter(bio, &iter, iter.bi_size);
334
335 if (!iter.bi_bvec_done)
336 idx = iter.bi_idx - 1;
337 else /* in the middle of bvec */
338 idx = iter.bi_idx;
339
340 *bv = bio->bi_io_vec[idx];
341
342 /*
343 * iter.bi_bvec_done records actual length of the last bvec
344 * if this bio ends in the middle of one io vector
345 */
346 if (iter.bi_bvec_done)
347 bv->bv_len = iter.bi_bvec_done;
348}
349
Martin K. Petersenc6115292014-09-26 19:20:08 -0400350enum bip_flags {
351 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
352 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
353 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
354 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
355 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
356};
357
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200358/*
359 * bio integrity payload
360 */
361struct bio_integrity_payload {
362 struct bio *bip_bio; /* parent bio */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200363
Kent Overstreetd57a5f72013-11-23 17:20:16 -0800364 struct bvec_iter bip_iter;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200365
Kent Overstreetd57a5f72013-11-23 17:20:16 -0800366 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200367
Martin K. Petersen7878cba2009-06-26 15:37:49 +0200368 unsigned short bip_slab; /* slab the bip came from */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200369 unsigned short bip_vcnt; /* # of integrity bio_vecs */
Gu Zhengcbcd10542014-07-01 10:36:47 -0600370 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
Martin K. Petersenb1f0138852014-09-26 19:20:04 -0400371 unsigned short bip_flags; /* control flags */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200372
373 struct work_struct bip_work; /* I/O completion */
Kent Overstreet6fda9812012-10-12 13:18:27 -0700374
375 struct bio_vec *bip_vec;
376 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200377};
Martin K. Petersen18593082014-09-26 19:20:01 -0400378
Keith Busch06c1e392015-12-03 09:32:21 -0700379#if defined(CONFIG_BLK_DEV_INTEGRITY)
380
381static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
382{
383 if (bio->bi_rw & REQ_INTEGRITY)
384 return bio->bi_integrity;
385
386 return NULL;
387}
388
Martin K. Petersenc6115292014-09-26 19:20:08 -0400389static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
390{
391 struct bio_integrity_payload *bip = bio_integrity(bio);
392
393 if (bip)
394 return bip->bip_flags & flag;
395
396 return false;
397}
Martin K. Petersenb1f0138852014-09-26 19:20:04 -0400398
Martin K. Petersen18593082014-09-26 19:20:01 -0400399static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
400{
401 return bip->bip_iter.bi_sector;
402}
403
404static inline void bip_set_seed(struct bio_integrity_payload *bip,
405 sector_t seed)
406{
407 bip->bip_iter.bi_sector = seed;
408}
409
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200410#endif /* CONFIG_BLK_DEV_INTEGRITY */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Kent Overstreet6678d832013-08-07 11:14:32 -0700412extern void bio_trim(struct bio *bio, int offset, int size);
Kent Overstreet20d01892013-11-23 18:21:01 -0800413extern struct bio *bio_split(struct bio *bio, int sectors,
414 gfp_t gfp, struct bio_set *bs);
415
416/**
417 * bio_next_split - get next @sectors from a bio, splitting if necessary
418 * @bio: bio to split
419 * @sectors: number of sectors to split from the front of @bio
420 * @gfp: gfp mask
421 * @bs: bio set to allocate from
422 *
423 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
424 * than @sectors, returns the original bio unchanged.
425 */
426static inline struct bio *bio_next_split(struct bio *bio, int sectors,
427 gfp_t gfp, struct bio_set *bs)
428{
429 if (sectors >= bio_sectors(bio))
430 return bio;
431
432 return bio_split(bio, sectors, gfp, bs);
433}
434
Jens Axboebb799ca2008-12-10 15:35:05 +0100435extern struct bio_set *bioset_create(unsigned int, unsigned int);
Junichi Nomurad8f429e2014-10-03 17:27:12 -0400436extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437extern void bioset_free(struct bio_set *);
Fabian Fredericka6c39cb4f2014-04-22 15:09:05 -0600438extern mempool_t *biovec_create_pool(int pool_entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
Al Virodd0fc662005-10-07 07:46:04 +0100440extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441extern void bio_put(struct bio *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Kent Overstreet59d276f2013-11-23 18:19:27 -0800443extern void __bio_clone_fast(struct bio *, struct bio *);
444extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
Kent Overstreetbf800ef2012-09-06 15:35:02 -0700445extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
446
Kent Overstreet3f86a822012-09-06 15:35:01 -0700447extern struct bio_set *fs_bio_set;
448
449static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
450{
451 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
452}
453
Kent Overstreetbf800ef2012-09-06 15:35:02 -0700454static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
455{
456 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
457}
458
Kent Overstreet3f86a822012-09-06 15:35:01 -0700459static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
460{
461 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
462}
463
Kent Overstreetbf800ef2012-09-06 15:35:02 -0700464static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
465{
466 return bio_clone_bioset(bio, gfp_mask, NULL);
467
468}
469
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200470extern void bio_endio(struct bio *);
471
472static inline void bio_io_error(struct bio *bio)
473{
474 bio->bi_error = -EIO;
475 bio_endio(bio);
476}
477
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478struct request_queue;
479extern int bio_phys_segments(struct request_queue *, struct bio *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Kent Overstreet9e882242012-09-10 14:41:12 -0700481extern int submit_bio_wait(int rw, struct bio *bio);
Kent Overstreet054bdf62012-09-28 13:17:55 -0700482extern void bio_advance(struct bio *, unsigned);
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484extern void bio_init(struct bio *);
Kent Overstreetf44b48c2012-09-06 15:34:58 -0700485extern void bio_reset(struct bio *);
Kent Overstreet196d38b2013-11-23 18:34:15 -0800486void bio_chain(struct bio *, struct bio *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
Mike Christie6e68af62005-11-11 05:30:27 -0600489extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
490 unsigned int, unsigned int);
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900491struct rq_map_data;
James Bottomley f1970ba2005-06-20 14:06:52 +0200492extern struct bio *bio_map_user_iov(struct request_queue *,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100493 const struct iov_iter *, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494extern void bio_unmap_user(struct bio *);
Mike Christie df46b9a2005-06-20 14:04:44 +0200495extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
Al Viro27496a82005-10-21 03:20:48 -0400496 gfp_t);
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200497extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
498 gfp_t, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499extern void bio_set_pages_dirty(struct bio *bio);
500extern void bio_check_pages_dirty(struct bio *bio);
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100501
Gu Zheng394ffa52014-11-24 11:05:22 +0800502void generic_start_io_acct(int rw, unsigned long sectors,
503 struct hd_struct *part);
504void generic_end_io_acct(int rw, struct hd_struct *part,
505 unsigned long start_time);
506
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100507#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
508# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
509#endif
510#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
511extern void bio_flush_dcache_pages(struct bio *bi);
512#else
513static inline void bio_flush_dcache_pages(struct bio *bi)
514{
515}
516#endif
517
Kent Overstreet16ac3d62012-09-10 13:57:51 -0700518extern void bio_copy_data(struct bio *dst, struct bio *src);
Kent Overstreeta0787602012-09-10 14:03:28 -0700519extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
Kent Overstreet16ac3d62012-09-10 13:57:51 -0700520
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900521extern struct bio *bio_copy_user_iov(struct request_queue *,
Al Viro86d564c2014-02-08 20:42:52 -0500522 struct rq_map_data *,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100523 const struct iov_iter *,
524 gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525extern int bio_uncopy_user(struct bio *);
526void zero_fill_bio(struct bio *bio);
Kent Overstreet9f060e22012-10-12 15:29:33 -0700527extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
528extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200529extern unsigned int bvec_nr_vecs(unsigned short idx);
Martin K. Petersen51d654e2008-06-17 18:59:56 +0200530
Tejun Heo852c7882012-03-05 13:15:27 -0800531#ifdef CONFIG_BLK_CGROUP
Tejun Heo1d933cf2015-05-22 17:13:24 -0400532int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
Tejun Heo852c7882012-03-05 13:15:27 -0800533int bio_associate_current(struct bio *bio);
534void bio_disassociate_task(struct bio *bio);
535#else /* CONFIG_BLK_CGROUP */
Tejun Heo1d933cf2015-05-22 17:13:24 -0400536static inline int bio_associate_blkcg(struct bio *bio,
537 struct cgroup_subsys_state *blkcg_css) { return 0; }
Tejun Heo852c7882012-03-05 13:15:27 -0800538static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
539static inline void bio_disassociate_task(struct bio *bio) { }
540#endif /* CONFIG_BLK_CGROUP */
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542#ifdef CONFIG_HIGHMEM
543/*
Alberto Bertogli20b636b2009-02-02 12:41:07 +0100544 * remember never ever reenable interrupts between a bvec_kmap_irq and
545 * bvec_kunmap_irq!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 */
Alberto Bertogli4f570f92009-11-02 11:40:16 +0100547static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
549 unsigned long addr;
550
551 /*
552 * might not be a highmem page, but the preempt/irq count
553 * balancing is a lot nicer this way
554 */
555 local_irq_save(*flags);
Cong Wange8e3c3d2011-11-25 23:14:27 +0800556 addr = (unsigned long) kmap_atomic(bvec->bv_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
558 BUG_ON(addr & ~PAGE_MASK);
559
560 return (char *) addr + bvec->bv_offset;
561}
562
Alberto Bertogli4f570f92009-11-02 11:40:16 +0100563static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564{
565 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
566
Cong Wange8e3c3d2011-11-25 23:14:27 +0800567 kunmap_atomic((void *) ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 local_irq_restore(*flags);
569}
570
571#else
Geert Uytterhoeven11a691b2010-10-21 10:32:29 +0200572static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
573{
574 return page_address(bvec->bv_page) + bvec->bv_offset;
575}
576
577static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
578{
579 *flags = 0;
580}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581#endif
582
Kent Overstreetf619d252013-08-07 14:30:33 -0700583static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 unsigned long *flags)
585{
Kent Overstreetf619d252013-08-07 14:30:33 -0700586 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
589
590#define bio_kmap_irq(bio, flags) \
Kent Overstreetf619d252013-08-07 14:30:33 -0700591 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
593
Jens Axboe7a67f632008-08-08 11:17:12 +0200594/*
Akinobu Mitae6863072009-04-17 08:41:21 +0200595 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
Christoph Hellwig8f3d8ba2009-04-07 19:55:13 +0200596 *
597 * A bio_list anchors a singly-linked list of bios chained through the bi_next
598 * member of the bio. The bio_list also caches the last list member to allow
599 * fast access to the tail.
600 */
601struct bio_list {
602 struct bio *head;
603 struct bio *tail;
604};
605
606static inline int bio_list_empty(const struct bio_list *bl)
607{
608 return bl->head == NULL;
609}
610
611static inline void bio_list_init(struct bio_list *bl)
612{
613 bl->head = bl->tail = NULL;
614}
615
Jens Axboe320ae512013-10-24 09:20:05 +0100616#define BIO_EMPTY_LIST { NULL, NULL }
617
Christoph Hellwig8f3d8ba2009-04-07 19:55:13 +0200618#define bio_list_for_each(bio, bl) \
619 for (bio = (bl)->head; bio; bio = bio->bi_next)
620
621static inline unsigned bio_list_size(const struct bio_list *bl)
622{
623 unsigned sz = 0;
624 struct bio *bio;
625
626 bio_list_for_each(bio, bl)
627 sz++;
628
629 return sz;
630}
631
632static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
633{
634 bio->bi_next = NULL;
635
636 if (bl->tail)
637 bl->tail->bi_next = bio;
638 else
639 bl->head = bio;
640
641 bl->tail = bio;
642}
643
644static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
645{
646 bio->bi_next = bl->head;
647
648 bl->head = bio;
649
650 if (!bl->tail)
651 bl->tail = bio;
652}
653
654static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
655{
656 if (!bl2->head)
657 return;
658
659 if (bl->tail)
660 bl->tail->bi_next = bl2->head;
661 else
662 bl->head = bl2->head;
663
664 bl->tail = bl2->tail;
665}
666
667static inline void bio_list_merge_head(struct bio_list *bl,
668 struct bio_list *bl2)
669{
670 if (!bl2->head)
671 return;
672
673 if (bl->head)
674 bl2->tail->bi_next = bl->head;
675 else
676 bl->tail = bl2->tail;
677
678 bl->head = bl2->head;
679}
680
Geert Uytterhoeven13685a12009-06-10 04:38:40 +0000681static inline struct bio *bio_list_peek(struct bio_list *bl)
682{
683 return bl->head;
684}
685
Christoph Hellwig8f3d8ba2009-04-07 19:55:13 +0200686static inline struct bio *bio_list_pop(struct bio_list *bl)
687{
688 struct bio *bio = bl->head;
689
690 if (bio) {
691 bl->head = bl->head->bi_next;
692 if (!bl->head)
693 bl->tail = NULL;
694
695 bio->bi_next = NULL;
696 }
697
698 return bio;
699}
700
701static inline struct bio *bio_list_get(struct bio_list *bl)
702{
703 struct bio *bio = bl->head;
704
705 bl->head = bl->tail = NULL;
706
707 return bio;
708}
709
Kent Overstreet57fb2332012-08-24 04:56:11 -0700710/*
711 * bio_set is used to allow other portions of the IO system to
712 * allocate their own private memory pools for bio and iovec structures.
713 * These memory pools in turn all allocate from the bio_slab
714 * and the bvec_slabs[].
715 */
716#define BIO_POOL_SIZE 2
717#define BIOVEC_NR_POOLS 6
718#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
719
720struct bio_set {
721 struct kmem_cache *bio_slab;
722 unsigned int front_pad;
723
724 mempool_t *bio_pool;
Kent Overstreet9f060e22012-10-12 15:29:33 -0700725 mempool_t *bvec_pool;
Kent Overstreet57fb2332012-08-24 04:56:11 -0700726#if defined(CONFIG_BLK_DEV_INTEGRITY)
727 mempool_t *bio_integrity_pool;
Kent Overstreet9f060e22012-10-12 15:29:33 -0700728 mempool_t *bvec_integrity_pool;
Kent Overstreet57fb2332012-08-24 04:56:11 -0700729#endif
Kent Overstreetdf2cb6d2012-09-10 14:33:46 -0700730
731 /*
732 * Deadlock avoidance for stacking block drivers: see comments in
733 * bio_alloc_bioset() for details
734 */
735 spinlock_t rescue_lock;
736 struct bio_list rescue_list;
737 struct work_struct rescue_work;
738 struct workqueue_struct *rescue_workqueue;
Kent Overstreet57fb2332012-08-24 04:56:11 -0700739};
740
741struct biovec_slab {
742 int nr_vecs;
743 char *name;
744 struct kmem_cache *slab;
745};
746
747/*
748 * a small number of entries is fine, not going to be performance critical.
749 * basically we just need to survive
750 */
751#define BIO_SPLIT_ENTRIES 2
752
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200753#if defined(CONFIG_BLK_DEV_INTEGRITY)
754
Kent Overstreetd57a5f72013-11-23 17:20:16 -0800755#define bip_for_each_vec(bvl, bip, iter) \
756 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200757
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200758#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
759 for_each_bio(_bio) \
760 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
761
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200762extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700763extern void bio_integrity_free(struct bio *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200764extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
Martin K. Petersene7258c12014-09-26 19:19:55 -0400765extern bool bio_integrity_enabled(struct bio *bio);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200766extern int bio_integrity_prep(struct bio *);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200767extern void bio_integrity_endio(struct bio *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200768extern void bio_integrity_advance(struct bio *, unsigned int);
769extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700770extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
Martin K. Petersen7878cba2009-06-26 15:37:49 +0200771extern int bioset_integrity_create(struct bio_set *, int);
772extern void bioset_integrity_free(struct bio_set *);
773extern void bio_integrity_init(void);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200774
775#else /* CONFIG_BLK_DEV_INTEGRITY */
776
Martin K. Petersenc6115292014-09-26 19:20:08 -0400777static inline void *bio_integrity(struct bio *bio)
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100778{
Martin K. Petersenc6115292014-09-26 19:20:08 -0400779 return NULL;
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100780}
781
Martin K. Petersene7258c12014-09-26 19:19:55 -0400782static inline bool bio_integrity_enabled(struct bio *bio)
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100783{
Martin K. Petersene7258c12014-09-26 19:19:55 -0400784 return false;
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100785}
786
787static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
788{
789 return 0;
790}
791
792static inline void bioset_integrity_free (struct bio_set *bs)
793{
794 return;
795}
796
797static inline int bio_integrity_prep(struct bio *bio)
798{
799 return 0;
800}
801
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700802static inline void bio_integrity_free(struct bio *bio)
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100803{
804 return;
805}
806
Stephen Rothwell0c614e22011-11-16 09:21:48 +0100807static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700808 gfp_t gfp_mask)
Stephen Rothwell0c614e22011-11-16 09:21:48 +0100809{
810 return 0;
811}
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100812
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100813static inline void bio_integrity_advance(struct bio *bio,
814 unsigned int bytes_done)
815{
816 return;
817}
818
819static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
820 unsigned int sectors)
821{
822 return;
823}
824
825static inline void bio_integrity_init(void)
826{
827 return;
828}
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200829
Martin K. Petersenc6115292014-09-26 19:20:08 -0400830static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
831{
832 return false;
833}
834
Keith Busch06c1e392015-12-03 09:32:21 -0700835static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
836 unsigned int nr)
837{
838 return ERR_PTR(-EINVAL);
839}
840
841static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
842 unsigned int len, unsigned int offset)
843{
844 return 0;
845}
846
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200847#endif /* CONFIG_BLK_DEV_INTEGRITY */
848
David Howells02a5e0a2007-08-11 22:34:32 +0200849#endif /* CONFIG_BLOCK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850#endif /* __LINUX_BIO_H */