blob: 005c9e2a491a1ff7d413f11f42ee44407af6e805 [file] [log] [blame]
David Sterba9888c342018-04-03 19:16:55 +02001/* SPDX-License-Identifier: GPL-2.0 */
Chris Mason0b86a832008-03-24 15:01:56 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason0b86a832008-03-24 15:01:56 -04004 */
5
David Sterba9888c342018-04-03 19:16:55 +02006#ifndef BTRFS_VOLUMES_H
7#define BTRFS_VOLUMES_H
Chris Mason8790d502008-04-03 16:29:03 -04008
Chris Masoncea9e442008-04-09 16:28:12 -04009#include <linux/bio.h>
Miao Xieb2117a32011-01-05 10:07:28 +000010#include <linux/sort.h>
Filipe Brandenburger55e301f2013-01-29 06:04:50 +000011#include <linux/btrfs.h>
Chris Mason8b712842008-06-11 16:50:36 -040012#include "async-thread.h"
Chris Masoncea9e442008-04-09 16:28:12 -040013
Qu Wenruofce466e2018-07-03 17:10:05 +080014#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
15
Miao Xie67a2c452014-09-03 21:35:43 +080016extern struct mutex uuid_mutex;
17
Byongho Leeee221842015-12-15 01:42:10 +090018#define BTRFS_STRIPE_LEN SZ_64K
Miao Xieb2117a32011-01-05 10:07:28 +000019
Nikolay Borisov5f141122019-06-03 12:05:03 +030020struct btrfs_io_geometry {
21 /* remaining bytes before crossing a stripe */
22 u64 len;
23 /* offset of logical address in chunk */
24 u64 offset;
25 /* length of single IO stripe */
26 u64 stripe_len;
27 /* number of stripe where address falls */
28 u64 stripe_nr;
29 /* offset of address in stripe */
30 u64 stripe_offset;
31 /* offset of raid56 stripe into the chunk */
32 u64 raid56_stripe_offset;
33};
34
Miao Xie7cc8e582014-09-03 21:35:38 +080035/*
36 * Use sequence counter to get consistent device stat data on
37 * 32-bit processors.
38 */
39#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
40#include <linux/seqlock.h>
41#define __BTRFS_NEED_DEVICE_DATA_ORDERED
Su Yuec41ec452021-01-21 19:39:10 +080042#define btrfs_device_data_ordered_init(device) \
43 seqcount_init(&device->data_seqcount)
Miao Xie7cc8e582014-09-03 21:35:38 +080044#else
Su Yuec41ec452021-01-21 19:39:10 +080045#define btrfs_device_data_ordered_init(device) do { } while (0)
Miao Xie7cc8e582014-09-03 21:35:38 +080046#endif
47
Anand Jainebbede42017-12-04 12:54:52 +080048#define BTRFS_DEV_STATE_WRITEABLE (0)
Anand Jaine12c9622017-12-04 12:54:53 +080049#define BTRFS_DEV_STATE_IN_FS_METADATA (1)
Anand Jaine6e674b2017-12-04 12:54:54 +080050#define BTRFS_DEV_STATE_MISSING (2)
Anand Jain401e29c2017-12-04 12:54:55 +080051#define BTRFS_DEV_STATE_REPLACE_TGT (3)
Anand Jain1c3063b2017-12-04 12:54:56 +080052#define BTRFS_DEV_STATE_FLUSH_SENT (4)
Filipe Manana66d204a2020-10-12 11:55:24 +010053#define BTRFS_DEV_STATE_NO_READA (5)
Anand Jainebbede42017-12-04 12:54:52 +080054
Naohiro Aota5b316462020-11-10 20:26:07 +090055struct btrfs_zoned_device_info;
56
Chris Mason0b86a832008-03-24 15:01:56 -040057struct btrfs_device {
Nikolay Borisov0b6f5d42019-05-09 18:11:11 +030058 struct list_head dev_list; /* device_list_mutex */
59 struct list_head dev_alloc_list; /* chunk mutex */
Nikolay Borisovbbbf7242019-03-25 14:31:22 +020060 struct list_head post_commit_list; /* chunk mutex */
Yan Zheng2b820322008-11-17 21:11:30 -050061 struct btrfs_fs_devices *fs_devices;
Jeff Mahoneyfb456252016-06-22 18:54:56 -040062 struct btrfs_fs_info *fs_info;
Chris Masonffbd5172009-04-20 15:50:09 -040063
Madhuparna Bhowmik8d1a7aa2019-12-05 01:49:01 +053064 struct rcu_string __rcu *name;
Miao Xied5ee37b2014-07-24 11:37:10 +080065
66 u64 generation;
67
Miao Xied5ee37b2014-07-24 11:37:10 +080068 struct block_device *bdev;
69
Naohiro Aota5b316462020-11-10 20:26:07 +090070 struct btrfs_zoned_device_info *zone_info;
71
Miao Xied5ee37b2014-07-24 11:37:10 +080072 /* the mode sent to blkdev_get */
73 fmode_t mode;
74
Anand Jainebbede42017-12-04 12:54:52 +080075 unsigned long dev_state;
Omar Sandoval58efbc92017-08-22 23:45:59 -070076 blk_status_t last_flush_error;
Chris Masonb3075712008-04-22 09:22:07 -040077
Miao Xie7cc8e582014-09-03 21:35:38 +080078#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
Su Yuec41ec452021-01-21 19:39:10 +080079 seqcount_t data_seqcount;
Miao Xie7cc8e582014-09-03 21:35:38 +080080#endif
81
Chris Mason0b86a832008-03-24 15:01:56 -040082 /* the internal btrfs device id */
83 u64 devid;
84
Miao Xie6ba40b62014-07-24 11:37:12 +080085 /* size of the device in memory */
Chris Mason0b86a832008-03-24 15:01:56 -040086 u64 total_bytes;
87
Miao Xie6ba40b62014-07-24 11:37:12 +080088 /* size of the device on disk */
Chris Balld6397ba2009-04-27 07:29:03 -040089 u64 disk_total_bytes;
90
Chris Mason0b86a832008-03-24 15:01:56 -040091 /* bytes used */
92 u64 bytes_used;
93
94 /* optimal io alignment for this device */
95 u32 io_align;
96
97 /* optimal io width for this device */
98 u32 io_width;
Dulshani Gunawardhana3c45bfc2013-10-31 09:57:33 +053099 /* type and info about this device */
100 u64 type;
Chris Mason0b86a832008-03-24 15:01:56 -0400101
102 /* minimal io size for this device */
103 u32 sector_size;
104
Chris Mason0b86a832008-03-24 15:01:56 -0400105 /* physical drive uuid (or lvm uuid) */
Chris Masone17cade2008-04-15 15:41:47 -0400106 u8 uuid[BTRFS_UUID_SIZE];
Chris Mason8b712842008-06-11 16:50:36 -0400107
Miao Xie935e5cc2014-09-03 21:35:33 +0800108 /*
109 * size of the device on the current transaction
110 *
111 * This variant is update when committing the transaction,
Nikolay Borisovbbbf7242019-03-25 14:31:22 +0200112 * and protected by chunk mutex
Miao Xie935e5cc2014-09-03 21:35:33 +0800113 */
114 u64 commit_total_bytes;
115
Miao Xiece7213c2014-09-03 21:35:34 +0800116 /* bytes used on the current transaction */
117 u64 commit_bytes_used;
Miao Xie935e5cc2014-09-03 21:35:33 +0800118
Dulshani Gunawardhana3c45bfc2013-10-31 09:57:33 +0530119 /* for sending down flush barriers */
Dulshani Gunawardhana3c45bfc2013-10-31 09:57:33 +0530120 struct bio *flush_bio;
121 struct completion flush_wait;
122
Arne Jansena2de7332011-03-08 14:14:00 +0100123 /* per-device scrub information */
Anand Jaincadbc0a2018-01-03 16:08:30 +0800124 struct scrub_ctx *scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100125
Stefan Behrens442a4f62012-05-25 16:06:08 +0200126 /* disk I/O failure stats. For detailed description refer to
127 * enum btrfs_dev_stat_values in ioctl.h */
Stefan Behrens733f4fb2012-05-25 16:06:10 +0200128 int dev_stats_valid;
Miao Xieaddc3fa2014-07-24 11:37:11 +0800129
130 /* Counter to record the change of device stats */
131 atomic_t dev_stats_ccnt;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200132 atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
Jeff Mahoney1c11b632019-03-27 14:24:12 +0200133
134 struct extent_io_tree alloc_state;
Anand Jain668e48af2020-01-06 19:38:31 +0800135
136 struct completion kobj_unregister;
137 /* For sysfs/FSID/devinfo/devid/ */
138 struct kobject devid_kobj;
David Sterbaeb3b5052019-10-09 13:58:13 +0200139
140 /* Bandwidth limit for scrub, in bytes */
141 u64 scrub_speed_max;
Chris Mason0b86a832008-03-24 15:01:56 -0400142};
143
Miao Xie7cc8e582014-09-03 21:35:38 +0800144/*
145 * If we read those variants at the context of their own lock, we needn't
146 * use the following helpers, reading them directly is safe.
147 */
148#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
149#define BTRFS_DEVICE_GETSET_FUNCS(name) \
150static inline u64 \
151btrfs_device_get_##name(const struct btrfs_device *dev) \
152{ \
153 u64 size; \
154 unsigned int seq; \
155 \
156 do { \
157 seq = read_seqcount_begin(&dev->data_seqcount); \
158 size = dev->name; \
159 } while (read_seqcount_retry(&dev->data_seqcount, seq)); \
160 return size; \
161} \
162 \
163static inline void \
164btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
165{ \
Su Yuec41ec452021-01-21 19:39:10 +0800166 preempt_disable(); \
Miao Xie7cc8e582014-09-03 21:35:38 +0800167 write_seqcount_begin(&dev->data_seqcount); \
168 dev->name = size; \
169 write_seqcount_end(&dev->data_seqcount); \
Su Yuec41ec452021-01-21 19:39:10 +0800170 preempt_enable(); \
Miao Xie7cc8e582014-09-03 21:35:38 +0800171}
Thomas Gleixner94545872019-10-15 21:18:11 +0200172#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
Miao Xie7cc8e582014-09-03 21:35:38 +0800173#define BTRFS_DEVICE_GETSET_FUNCS(name) \
174static inline u64 \
175btrfs_device_get_##name(const struct btrfs_device *dev) \
176{ \
177 u64 size; \
178 \
179 preempt_disable(); \
180 size = dev->name; \
181 preempt_enable(); \
182 return size; \
183} \
184 \
185static inline void \
186btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
187{ \
188 preempt_disable(); \
189 dev->name = size; \
190 preempt_enable(); \
191}
192#else
193#define BTRFS_DEVICE_GETSET_FUNCS(name) \
194static inline u64 \
195btrfs_device_get_##name(const struct btrfs_device *dev) \
196{ \
197 return dev->name; \
198} \
199 \
200static inline void \
201btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
202{ \
203 dev->name = size; \
204}
205#endif
206
207BTRFS_DEVICE_GETSET_FUNCS(total_bytes);
208BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes);
209BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
210
Naohiro Aotac4a816c2020-02-25 12:56:08 +0900211enum btrfs_chunk_allocation_policy {
212 BTRFS_CHUNK_ALLOC_REGULAR,
Naohiro Aota1cd61212021-02-04 19:21:48 +0900213 BTRFS_CHUNK_ALLOC_ZONED,
Naohiro Aotac4a816c2020-02-25 12:56:08 +0900214};
215
Anand Jain33fd2f72020-10-28 21:14:46 +0800216/*
217 * Read policies for mirrored block group profiles, read picks the stripe based
218 * on these policies.
219 */
220enum btrfs_read_policy {
221 /* Use process PID to choose the stripe */
222 BTRFS_READ_POLICY_PID,
223 BTRFS_NR_READ_POLICY,
224};
225
Chris Mason8a4b83c2008-03-24 15:02:07 -0400226struct btrfs_fs_devices {
227 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
Nikolay Borisov7239ff42018-10-30 16:43:23 +0200228 u8 metadata_uuid[BTRFS_FSID_SIZE];
Nikolay Borisovd1a63002018-10-30 16:43:26 +0200229 bool fsid_change;
Anand Jainc4babc52018-04-12 10:29:25 +0800230 struct list_head fs_list;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400231
Anand Jainadd97452021-10-05 16:12:40 -0400232 /*
233 * Number of devices under this fsid including missing and
234 * replace-target device and excludes seed devices.
235 */
Chris Mason8a4b83c2008-03-24 15:02:07 -0400236 u64 num_devices;
Anand Jainadd97452021-10-05 16:12:40 -0400237
238 /*
239 * The number of devices that successfully opened, including
240 * replace-target, excludes seed devices.
241 */
Chris Masona0af4692008-05-13 16:03:06 -0400242 u64 open_devices;
Anand Jainadd97452021-10-05 16:12:40 -0400243
244 /* The number of devices that are under the chunk allocation list. */
Yan Zheng2b820322008-11-17 21:11:30 -0500245 u64 rw_devices;
Anand Jainadd97452021-10-05 16:12:40 -0400246
247 /* Count of missing devices under this fsid excluding seed device. */
Chris Masoncd02dca2010-12-13 14:56:23 -0500248 u64 missing_devices;
Yan Zheng2b820322008-11-17 21:11:30 -0500249 u64 total_rw_bytes;
Anand Jainadd97452021-10-05 16:12:40 -0400250
251 /*
252 * Count of devices from btrfs_super_block::num_devices for this fsid,
253 * which includes the seed device, excludes the transient replace-target
254 * device.
255 */
Josef Bacik02db0842012-06-21 16:03:58 -0400256 u64 total_devices;
Nikolay Borisovd1a63002018-10-30 16:43:26 +0200257
258 /* Highest generation number of seen devices */
259 u64 latest_generation;
260
Anand Jaind24fa5c2021-08-24 13:05:19 +0800261 /*
262 * The mount device or a device with highest generation after removal
263 * or replace.
264 */
265 struct btrfs_device *latest_dev;
Chris Masone5e9a522009-06-10 15:17:02 -0400266
267 /* all of the devices in the FS, protected by a mutex
268 * so we can safely walk it to write out the supers without
Wang Shilong9b011ad2013-10-25 19:12:02 +0800269 * worrying about add/remove by the multi-device code.
270 * Scrubbing super can kick off supers writing by holding
271 * this mutex lock.
Chris Masone5e9a522009-06-10 15:17:02 -0400272 */
273 struct mutex device_list_mutex;
Nikolay Borisov0b6f5d42019-05-09 18:11:11 +0300274
275 /* List of all devices, protected by device_list_mutex */
Chris Mason8a4b83c2008-03-24 15:02:07 -0400276 struct list_head devices;
Chris Masonb3075712008-04-22 09:22:07 -0400277
Nikolay Borisov0b6f5d42019-05-09 18:11:11 +0300278 /*
279 * Devices which can satisfy space allocation. Protected by
280 * chunk_mutex
281 */
Chris Masonb3075712008-04-22 09:22:07 -0400282 struct list_head alloc_list;
Yan Zheng2b820322008-11-17 21:11:30 -0500283
Nikolay Borisov944d3f92020-07-16 10:25:33 +0300284 struct list_head seed_list;
Johannes Thumshirn0395d842019-11-13 11:27:27 +0100285 bool seeding;
Yan Zheng2b820322008-11-17 21:11:30 -0500286
287 int opened;
Chris Masonc2898112009-06-10 09:51:32 -0400288
289 /* set when we find or add a device that doesn't have the
290 * nonrot flag set
291 */
Johannes Thumshirn7f0432d2019-11-13 11:27:28 +0100292 bool rotating;
Anand Jain2e7910d2015-03-10 06:38:29 +0800293
Anand Jain5a13f432015-03-10 06:38:31 +0800294 struct btrfs_fs_info *fs_info;
Anand Jain2e7910d2015-03-10 06:38:29 +0800295 /* sysfs kobjects */
Anand Jainc1b7e472015-08-14 18:32:50 +0800296 struct kobject fsid_kobj;
Anand Jainb5501502019-11-21 17:33:30 +0800297 struct kobject *devices_kobj;
Anand Jaina013d142020-02-12 17:28:10 +0800298 struct kobject *devinfo_kobj;
Anand Jain2e7910d2015-03-10 06:38:29 +0800299 struct completion kobj_unregister;
Naohiro Aotac4a816c2020-02-25 12:56:08 +0900300
301 enum btrfs_chunk_allocation_policy chunk_alloc_policy;
Anand Jain33fd2f72020-10-28 21:14:46 +0800302
303 /* Policy used to read the mirrored stripes */
304 enum btrfs_read_policy read_policy;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400305};
306
Miao Xiefacc8a222013-07-25 19:22:34 +0800307#define BTRFS_BIO_INLINE_CSUM_SIZE 64
308
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800309#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
310 - sizeof(struct btrfs_chunk)) \
311 / sizeof(struct btrfs_stripe) + 1)
312
313#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
314 - 2 * sizeof(struct btrfs_disk_key) \
315 - 2 * sizeof(struct btrfs_chunk)) \
316 / sizeof(struct btrfs_stripe) + 1)
317
Chris Mason9be33952013-05-17 18:30:14 -0400318/*
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800319 * Additional info to pass along bio.
320 *
321 * Mostly for btrfs specific features like csum and mirror_num.
Chris Mason9be33952013-05-17 18:30:14 -0400322 */
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800323struct btrfs_bio {
Miao Xiec1dc0892014-09-12 18:43:56 +0800324 unsigned int mirror_num;
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800325
326 /* @device is for stripe IO submission. */
Nikolay Borisovc31efbd2020-07-03 11:14:27 +0300327 struct btrfs_device *device;
Miao Xiefacc8a222013-07-25 19:22:34 +0800328 u8 *csum;
329 u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
Liu Bo17347ce2017-05-15 15:33:27 -0700330 struct bvec_iter iter;
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800331
David Sterbafa1bcbe2017-06-12 17:29:36 +0200332 /*
333 * This member must come last, bio_alloc_bioset will allocate enough
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800334 * bytes for entire btrfs_bio but relies on bio being last.
David Sterbafa1bcbe2017-06-12 17:29:36 +0200335 */
Chris Mason9be33952013-05-17 18:30:14 -0400336 struct bio bio;
337};
338
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800339static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
Chris Mason9be33952013-05-17 18:30:14 -0400340{
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800341 return container_of(bio, struct btrfs_bio, bio);
Chris Mason9be33952013-05-17 18:30:14 -0400342}
343
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800344static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio)
David Sterbab3a0dd52018-11-22 17:16:49 +0100345{
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800346 if (bbio->csum != bbio->csum_inline) {
347 kfree(bbio->csum);
348 bbio->csum = NULL;
David Sterbab3a0dd52018-11-22 17:16:49 +0100349 }
350}
351
Qu Wenruo4c664612021-09-15 15:17:16 +0800352struct btrfs_io_stripe {
Chris Masoncea9e442008-04-09 16:28:12 -0400353 struct btrfs_device *dev;
354 u64 physical;
Li Dongyangfce3bb92011-03-24 10:24:26 +0000355 u64 length; /* only used for discard mappings */
Chris Masoncea9e442008-04-09 16:28:12 -0400356};
357
Qu Wenruo4c664612021-09-15 15:17:16 +0800358/*
359 * Context for IO subsmission for device stripe.
360 *
361 * - Track the unfinished mirrors for mirror based profiles
362 * Mirror based profiles are SINGLE/DUP/RAID1/RAID10.
363 *
364 * - Contain the logical -> physical mapping info
365 * Used by submit_stripe_bio() for mapping logical bio
366 * into physical device address.
367 *
368 * - Contain device replace info
369 * Used by handle_ops_on_dev_replace() to copy logical bios
370 * into the new device.
371 *
372 * - Contain RAID56 full stripe logical bytenrs
373 */
374struct btrfs_io_context {
Elena Reshetova140475a2017-03-03 10:55:10 +0200375 refcount_t refs;
Chris Masoncea9e442008-04-09 16:28:12 -0400376 atomic_t stripes_pending;
Miao Xiec404e0d2014-01-30 16:46:55 +0800377 struct btrfs_fs_info *fs_info;
Zhao Lei10f11902015-01-20 15:11:43 +0800378 u64 map_type; /* get from map_lookup->type */
Chris Masoncea9e442008-04-09 16:28:12 -0400379 bio_end_io_t *end_io;
Chris Mason7d2b4da2008-08-05 10:13:57 -0400380 struct bio *orig_bio;
Chris Masoncea9e442008-04-09 16:28:12 -0400381 void *private;
Chris Masona236aed2008-04-29 09:38:00 -0400382 atomic_t error;
383 int max_errors;
Chris Masoncea9e442008-04-09 16:28:12 -0400384 int num_stripes;
Jan Schmidta1d3c472011-08-04 17:15:33 +0200385 int mirror_num;
Miao Xie2c8cdd62014-11-14 16:06:25 +0800386 int num_tgtdevs;
387 int *tgtdev_map;
Zhao Lei8e5cfb52015-01-20 15:11:33 +0800388 /*
389 * logical block numbers for the start of each stripe
390 * The last one or two are p/q. These are sorted,
391 * so raid_map[0] is the start of our full stripe
392 */
393 u64 *raid_map;
Qu Wenruo4c664612021-09-15 15:17:16 +0800394 struct btrfs_io_stripe stripes[];
Chris Masoncea9e442008-04-09 16:28:12 -0400395};
396
Miao Xieb2117a32011-01-05 10:07:28 +0000397struct btrfs_device_info {
398 struct btrfs_device *dev;
399 u64 dev_offset;
400 u64 max_avail;
Arne Jansen73c5de02011-04-12 12:07:57 +0200401 u64 total_avail;
Miao Xieb2117a32011-01-05 10:07:28 +0000402};
403
Liu Bo31e50222012-11-21 14:18:10 +0000404struct btrfs_raid_attr {
David Sterba8c3e3582019-05-17 11:43:36 +0200405 u8 sub_stripes; /* sub_stripes info for map */
406 u8 dev_stripes; /* stripes per dev */
407 u8 devs_max; /* max devs to use */
408 u8 devs_min; /* min devs needed */
409 u8 tolerated_failures; /* max tolerated fail devs */
410 u8 devs_increment; /* ndevs has to be a multiple of this */
411 u8 ncopies; /* how many copies to data has */
412 u8 nparity; /* number of stripes worth of bytes to store
Hans van Kranenburgb50836e2018-10-04 23:24:42 +0200413 * parity information */
David Sterba8c3e3582019-05-17 11:43:36 +0200414 u8 mindev_error; /* error code if min devs requisite is unmet */
Anand Jained234672018-04-25 19:01:42 +0800415 const char raid_name[8]; /* name of the raid */
Anand Jain41a6e892018-04-25 19:01:43 +0800416 u64 bg_flag; /* block group flag of the raid */
Liu Bo31e50222012-11-21 14:18:10 +0000417};
418
Zhao Leiaf902042015-09-15 21:08:06 +0800419extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
Zhao Leiaf902042015-09-15 21:08:06 +0800420
liubo1abe9b82011-03-24 11:18:59 +0000421struct map_lookup {
422 u64 type;
423 int io_align;
424 int io_width;
Liu Bo3d8da672016-04-26 17:53:31 -0700425 u64 stripe_len;
liubo1abe9b82011-03-24 11:18:59 +0000426 int num_stripes;
427 int sub_stripes;
Qu Wenruocf90d882018-08-01 10:37:19 +0800428 int verified_stripes; /* For mount time dev extent verification */
Qu Wenruo4c664612021-09-15 15:17:16 +0800429 struct btrfs_io_stripe stripes[];
liubo1abe9b82011-03-24 11:18:59 +0000430};
431
Arne Jansena2de7332011-03-08 14:14:00 +0100432#define map_lookup_size(n) (sizeof(struct map_lookup) + \
Qu Wenruo4c664612021-09-15 15:17:16 +0800433 (sizeof(struct btrfs_io_stripe) * (n)))
Arne Jansena2de7332011-03-08 14:14:00 +0100434
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200435struct btrfs_balance_args;
Ilya Dryomov19a39dc2012-01-16 22:04:49 +0200436struct btrfs_balance_progress;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200437struct btrfs_balance_control {
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200438 struct btrfs_balance_args data;
439 struct btrfs_balance_args meta;
440 struct btrfs_balance_args sys;
441
442 u64 flags;
Ilya Dryomov19a39dc2012-01-16 22:04:49 +0200443
444 struct btrfs_balance_progress stat;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200445};
446
Josef Bacik562d7b12021-10-05 16:12:42 -0400447/*
448 * Search for a given device by the set parameters
449 */
450struct btrfs_dev_lookup_args {
451 u64 devid;
452 u8 *uuid;
453 u8 *fsid;
454 bool missing;
455};
456
457/* We have to initialize to -1 because BTRFS_DEV_REPLACE_DEVID is 0 */
458#define BTRFS_DEV_LOOKUP_ARGS_INIT { .devid = (u64)-1 }
459
460#define BTRFS_DEV_LOOKUP_ARGS(name) \
461 struct btrfs_dev_lookup_args name = BTRFS_DEV_LOOKUP_ARGS_INIT
462
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200463enum btrfs_map_op {
464 BTRFS_MAP_READ,
465 BTRFS_MAP_WRITE,
466 BTRFS_MAP_DISCARD,
467 BTRFS_MAP_GET_READ_MIRRORS,
468};
469
470static inline enum btrfs_map_op btrfs_op(struct bio *bio)
471{
472 switch (bio_op(bio)) {
473 case REQ_OP_DISCARD:
474 return BTRFS_MAP_DISCARD;
475 case REQ_OP_WRITE:
Naohiro Aotacfe94442021-02-04 19:21:59 +0900476 case REQ_OP_ZONE_APPEND:
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200477 return BTRFS_MAP_WRITE;
478 default:
479 WARN_ON_ONCE(1);
Marcos Paulo de Souzac730ae02020-06-16 15:54:29 -0300480 fallthrough;
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200481 case REQ_OP_READ:
482 return BTRFS_MAP_READ;
483 }
484}
485
Qu Wenruo4c664612021-09-15 15:17:16 +0800486void btrfs_get_bioc(struct btrfs_io_context *bioc);
487void btrfs_put_bioc(struct btrfs_io_context *bioc);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200488int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
Chris Masoncea9e442008-04-09 16:28:12 -0400489 u64 logical, u64 *length,
Qu Wenruo4c664612021-09-15 15:17:16 +0800490 struct btrfs_io_context **bioc_ret, int mirror_num);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200491int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
Miao Xieaf8e2d12014-10-23 14:42:50 +0800492 u64 logical, u64 *length,
Qu Wenruo4c664612021-09-15 15:17:16 +0800493 struct btrfs_io_context **bioc_ret);
Michal Rostecki42034312021-01-27 14:57:27 +0100494int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
Qu Wenruo43c0d1a2021-04-13 17:58:48 +0800495 enum btrfs_map_op op, u64 logical,
Michal Rostecki42034312021-01-27 14:57:27 +0100496 struct btrfs_io_geometry *io_geom);
Jeff Mahoney6bccf3a2016-06-21 21:16:51 -0400497int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
Jeff Mahoney5b4aace2016-06-21 10:40:19 -0400498int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
Nikolay Borisovf6f39f72021-08-18 13:41:19 +0300499struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
Filipe Manana79bd3712021-06-29 14:43:06 +0100500 u64 type);
David Sterbac8bf1b62019-05-17 11:43:17 +0200501void btrfs_mapping_tree_free(struct extent_map_tree *tree);
Omar Sandoval58efbc92017-08-22 23:45:59 -0700502blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
Chris Mason08635ba2019-07-10 12:28:14 -0700503 int mirror_num);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400504int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
Christoph Hellwig97288f22008-12-02 06:36:09 -0500505 fmode_t flags, void *holder);
Gu Jinxiang36350e92018-07-12 14:23:16 +0800506struct btrfs_device *btrfs_scan_one_device(const char *path,
507 fmode_t flags, void *holder);
Anand Jain228a73a2019-01-04 13:31:54 +0800508int btrfs_forget_devices(const char *path);
Nikolay Borisov54eed6a2020-07-15 13:48:48 +0300509void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
Anand Jainbacce862020-11-06 16:06:33 +0800510void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices);
Nikolay Borisovd6507cf2018-07-20 19:37:50 +0300511void btrfs_assign_next_active_device(struct btrfs_device *device,
512 struct btrfs_device *this_dev);
Nikolay Borisova27a94c2018-09-03 12:46:14 +0300513struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
514 u64 devid,
515 const char *devpath);
Josef Bacikfaa775c2021-10-05 16:12:43 -0400516int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
517 struct btrfs_dev_lookup_args *args,
518 const char *path);
Ilya Dryomov12bd2fc2013-08-23 13:20:17 +0300519struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
520 const u64 *devid,
521 const u8 *uuid);
Josef Bacikfaa775c2021-10-05 16:12:43 -0400522void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args);
David Sterbaa425f9d2018-03-20 15:47:33 +0100523void btrfs_free_device(struct btrfs_device *device);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400524int btrfs_rm_device(struct btrfs_fs_info *fs_info,
Josef Bacik1a15eb72021-10-05 16:12:44 -0400525 struct btrfs_dev_lookup_args *args,
Josef Bacik3fa421d2021-07-27 17:01:17 -0400526 struct block_device **bdev, fmode_t *mode);
David Sterbaffc5a372018-02-19 17:24:15 +0100527void __exit btrfs_cleanup_fs_uuids(void);
Stefan Behrens5d964052012-11-05 14:59:07 +0100528int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
Chris Mason8f18cf12008-04-25 16:53:30 -0400529int btrfs_grow_device(struct btrfs_trans_handle *trans,
530 struct btrfs_device *device, u64 new_size);
Josef Bacik562d7b12021-10-05 16:12:42 -0400531struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
532 const struct btrfs_dev_lookup_args *args);
Chris Mason8f18cf12008-04-25 16:53:30 -0400533int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
David Sterbada353f62017-02-14 17:55:53 +0100534int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
David Sterba6fcf6e22018-05-07 17:44:03 +0200535int btrfs_balance(struct btrfs_fs_info *fs_info,
536 struct btrfs_balance_control *bctl,
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200537 struct btrfs_ioctl_balance_args *bargs);
Anand Jainf89e09c2018-11-20 16:12:55 +0800538void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
Ilya Dryomov2b6ba622012-06-22 12:24:13 -0600539int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
Ilya Dryomov68310a52012-06-22 12:24:12 -0600540int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
Ilya Dryomov837d5b62012-01-16 22:04:49 +0200541int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
Johannes Thumshirn18bb8bb2021-04-19 16:41:02 +0900542int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset);
Ilya Dryomova7e99c62012-01-16 22:04:49 +0200543int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
Stefan Behrensf7a81ea2013-08-15 17:11:19 +0200544int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
Nikolay Borisov97f4dd02020-02-18 16:56:08 +0200545int btrfs_uuid_scan_kthread(void *data);
Anand Jaina09f23c2021-08-24 13:27:42 +0800546bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset);
Nikolay Borisov60dfdf22019-03-27 14:24:14 +0200547int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
Josef Bacikba1bf482009-09-11 16:11:19 -0400548 u64 *start, u64 *max_avail);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200549void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400550int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
David Sterbab27f7c02012-06-22 06:30:39 -0600551 struct btrfs_ioctl_get_dev_stats *stats);
Miao Xiecb517ea2013-05-15 07:48:19 +0000552void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
Stefan Behrens733f4fb2012-05-25 16:06:10 +0200553int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
David Sterba196c9d82019-03-20 16:50:38 +0100554int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
Nikolay Borisov68a9db52018-07-20 19:37:48 +0300555void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
David Sterba65237ee2019-03-20 16:34:54 +0100556void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
Nikolay Borisov4f5ad7b2018-07-20 19:37:51 +0300557void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
Liu Bo592d92e2017-03-14 13:33:55 -0700558int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
Nikolay Borisove4ff5fb2017-07-19 10:48:42 +0300559 u64 logical, u64 len);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400560unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
David Woodhouse53b381b2013-01-29 18:40:14 -0500561 u64 logical);
Filipe Manana79bd3712021-06-29 14:43:06 +0100562int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
563 struct btrfs_block_group *bg);
Nikolay Borisov97aff912018-07-20 19:37:53 +0300564int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
Omar Sandoval60ca8422018-05-16 16:34:31 -0700565struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
566 u64 logical, u64 length);
Johannes Thumshirn8f323802020-02-14 00:24:32 +0900567void btrfs_release_disk_super(struct btrfs_super_block *super);
Miao Xieaddc3fa2014-07-24 11:37:11 +0800568
Stefan Behrens442a4f62012-05-25 16:06:08 +0200569static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
570 int index)
571{
572 atomic_inc(dev->dev_stat_values + index);
Nikolay Borisov9deae962017-10-24 13:47:37 +0300573 /*
574 * This memory barrier orders stores updating statistics before stores
575 * updating dev_stats_ccnt.
576 *
577 * It pairs with smp_rmb() in btrfs_run_dev_stats().
578 */
Miao Xieaddc3fa2014-07-24 11:37:11 +0800579 smp_mb__before_atomic();
580 atomic_inc(&dev->dev_stats_ccnt);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200581}
582
583static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
584 int index)
585{
586 return atomic_read(dev->dev_stat_values + index);
587}
588
589static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
590 int index)
591{
592 int ret;
593
594 ret = atomic_xchg(dev->dev_stat_values + index, 0);
Nikolay Borisov4660c492017-10-20 18:10:58 +0300595 /*
596 * atomic_xchg implies a full memory barriers as per atomic_t.txt:
597 * - RMW operations that have a return value are fully ordered;
598 *
599 * This implicit memory barriers is paired with the smp_rmb in
600 * btrfs_run_dev_stats
601 */
Miao Xieaddc3fa2014-07-24 11:37:11 +0800602 atomic_inc(&dev->dev_stats_ccnt);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200603 return ret;
604}
605
606static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
607 int index, unsigned long val)
608{
609 atomic_set(dev->dev_stat_values + index, val);
Nikolay Borisov9deae962017-10-24 13:47:37 +0300610 /*
611 * This memory barrier orders stores updating statistics before stores
612 * updating dev_stats_ccnt.
613 *
614 * It pairs with smp_rmb() in btrfs_run_dev_stats().
615 */
Miao Xieaddc3fa2014-07-24 11:37:11 +0800616 smp_mb__before_atomic();
617 atomic_inc(&dev->dev_stats_ccnt);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200618}
619
Nikolay Borisovbbbf7242019-03-25 14:31:22 +0200620void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
Filipe Manana04216822014-11-27 21:14:15 +0000621
David Sterba4143cb82019-10-01 19:57:37 +0200622struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
Anand Jain6528b992017-12-18 17:08:59 +0800623bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
624 struct btrfs_device *failing_dev);
Josef Bacik313b0852020-08-20 11:18:26 -0400625void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
626 struct block_device *bdev,
627 const char *device_path);
Qu Wenruo21634a12017-03-09 09:34:36 +0800628
David Sterba500a44c2021-07-26 14:15:19 +0200629enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags);
David Sterba46df06b2018-07-13 20:46:30 +0200630int btrfs_bg_type_to_factor(u64 flags);
David Sterba158da512019-05-17 11:43:41 +0200631const char *btrfs_bg_type_to_raid_name(u64 flags);
Qu Wenruocf90d882018-08-01 10:37:19 +0800632int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
Johannes Thumshirn554aed72021-12-07 06:28:36 -0800633bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
David Sterba46df06b2018-07-13 20:46:30 +0200634
Chris Mason0b86a832008-03-24 15:01:56 -0400635#endif