David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2011 STRATO. All rights reserved. |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/pagemap.h> |
| 8 | #include <linux/writeback.h> |
| 9 | #include <linux/blkdev.h> |
| 10 | #include <linux/rbtree.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/workqueue.h> |
| 13 | #include "ctree.h" |
| 14 | #include "volumes.h" |
| 15 | #include "disk-io.h" |
| 16 | #include "transaction.h" |
Stefan Behrens | 8dabb74 | 2012-11-06 13:15:27 +0100 | [diff] [blame] | 17 | #include "dev-replace.h" |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 18 | |
| 19 | #undef DEBUG |
| 20 | |
| 21 | /* |
| 22 | * This is the implementation for the generic read ahead framework. |
| 23 | * |
| 24 | * To trigger a readahead, btrfs_reada_add must be called. It will start |
| 25 | * a read ahead for the given range [start, end) on tree root. The returned |
| 26 | * handle can either be used to wait on the readahead to finish |
| 27 | * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach). |
| 28 | * |
| 29 | * The read ahead works as follows: |
| 30 | * On btrfs_reada_add, the root of the tree is inserted into a radix_tree. |
| 31 | * reada_start_machine will then search for extents to prefetch and trigger |
| 32 | * some reads. When a read finishes for a node, all contained node/leaf |
| 33 | * pointers that lie in the given range will also be enqueued. The reads will |
| 34 | * be triggered in sequential order, thus giving a big win over a naive |
| 35 | * enumeration. It will also make use of multi-device layouts. Each disk |
| 36 | * will have its on read pointer and all disks will by utilized in parallel. |
| 37 | * Also will no two disks read both sides of a mirror simultaneously, as this |
| 38 | * would waste seeking capacity. Instead both disks will read different parts |
| 39 | * of the filesystem. |
| 40 | * Any number of readaheads can be started in parallel. The read order will be |
| 41 | * determined globally, i.e. 2 parallel readaheads will normally finish faster |
| 42 | * than the 2 started one after another. |
| 43 | */ |
| 44 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 45 | #define MAX_IN_FLIGHT 6 |
| 46 | |
| 47 | struct reada_extctl { |
| 48 | struct list_head list; |
| 49 | struct reada_control *rc; |
| 50 | u64 generation; |
| 51 | }; |
| 52 | |
| 53 | struct reada_extent { |
| 54 | u64 logical; |
| 55 | struct btrfs_key top; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 56 | struct list_head extctl; |
Al Viro | 99621b4 | 2012-08-29 16:31:33 -0400 | [diff] [blame] | 57 | int refcnt; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 58 | spinlock_t lock; |
Stefan Behrens | 94598ba | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 59 | struct reada_zone *zones[BTRFS_MAX_MIRRORS]; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 60 | int nzones; |
Zhao Lei | 895a11b | 2016-01-12 14:58:39 +0800 | [diff] [blame] | 61 | int scheduled; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 62 | }; |
| 63 | |
| 64 | struct reada_zone { |
| 65 | u64 start; |
| 66 | u64 end; |
| 67 | u64 elems; |
| 68 | struct list_head list; |
| 69 | spinlock_t lock; |
| 70 | int locked; |
| 71 | struct btrfs_device *device; |
Stefan Behrens | 94598ba | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 72 | struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl |
| 73 | * self */ |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 74 | int ndevs; |
| 75 | struct kref refcnt; |
| 76 | }; |
| 77 | |
| 78 | struct reada_machine_work { |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 79 | struct btrfs_work work; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 80 | struct btrfs_fs_info *fs_info; |
| 81 | }; |
| 82 | |
| 83 | static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *); |
| 84 | static void reada_control_release(struct kref *kref); |
| 85 | static void reada_zone_release(struct kref *kref); |
| 86 | static void reada_start_machine(struct btrfs_fs_info *fs_info); |
| 87 | static void __reada_start_machine(struct btrfs_fs_info *fs_info); |
| 88 | |
| 89 | static int reada_add_block(struct reada_control *rc, u64 logical, |
Zhao Lei | 1e7970c | 2015-12-31 20:30:00 +0800 | [diff] [blame] | 90 | struct btrfs_key *top, u64 generation); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 91 | |
| 92 | /* recurses */ |
| 93 | /* in case of err, eb might be NULL */ |
Zhao Lei | 02873e4 | 2015-12-31 22:46:45 +0800 | [diff] [blame] | 94 | static void __readahead_hook(struct btrfs_fs_info *fs_info, |
| 95 | struct reada_extent *re, struct extent_buffer *eb, |
David Sterba | bcdc51b | 2016-11-08 13:39:05 +0100 | [diff] [blame] | 96 | int err) |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 97 | { |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 98 | int nritems; |
| 99 | int i; |
| 100 | u64 bytenr; |
| 101 | u64 generation; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 102 | struct list_head list; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 103 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 104 | spin_lock(&re->lock); |
| 105 | /* |
| 106 | * just take the full list from the extent. afterwards we |
| 107 | * don't need the lock anymore |
| 108 | */ |
| 109 | list_replace_init(&re->extctl, &list); |
Zhao Lei | 895a11b | 2016-01-12 14:58:39 +0800 | [diff] [blame] | 110 | re->scheduled = 0; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 111 | spin_unlock(&re->lock); |
| 112 | |
Zhao Lei | 57f16e0 | 2015-12-31 22:20:59 +0800 | [diff] [blame] | 113 | /* |
| 114 | * this is the error case, the extent buffer has not been |
| 115 | * read correctly. We won't access anything from it and |
| 116 | * just cleanup our data structures. Effectively this will |
| 117 | * cut the branch below this node from read ahead. |
| 118 | */ |
| 119 | if (err) |
| 120 | goto cleanup; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 121 | |
Zhao Lei | 57f16e0 | 2015-12-31 22:20:59 +0800 | [diff] [blame] | 122 | /* |
| 123 | * FIXME: currently we just set nritems to 0 if this is a leaf, |
| 124 | * effectively ignoring the content. In a next step we could |
| 125 | * trigger more readahead depending from the content, e.g. |
| 126 | * fetch the checksums for the extents in the leaf. |
| 127 | */ |
David Sterba | 04998b3 | 2016-11-08 13:32:43 +0100 | [diff] [blame] | 128 | if (!btrfs_header_level(eb)) |
Zhao Lei | 57f16e0 | 2015-12-31 22:20:59 +0800 | [diff] [blame] | 129 | goto cleanup; |
| 130 | |
| 131 | nritems = btrfs_header_nritems(eb); |
| 132 | generation = btrfs_header_generation(eb); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 133 | for (i = 0; i < nritems; i++) { |
| 134 | struct reada_extctl *rec; |
| 135 | u64 n_gen; |
| 136 | struct btrfs_key key; |
| 137 | struct btrfs_key next_key; |
| 138 | |
| 139 | btrfs_node_key_to_cpu(eb, &key, i); |
| 140 | if (i + 1 < nritems) |
| 141 | btrfs_node_key_to_cpu(eb, &next_key, i + 1); |
| 142 | else |
| 143 | next_key = re->top; |
| 144 | bytenr = btrfs_node_blockptr(eb, i); |
| 145 | n_gen = btrfs_node_ptr_generation(eb, i); |
| 146 | |
| 147 | list_for_each_entry(rec, &list, list) { |
| 148 | struct reada_control *rc = rec->rc; |
| 149 | |
| 150 | /* |
| 151 | * if the generation doesn't match, just ignore this |
| 152 | * extctl. This will probably cut off a branch from |
| 153 | * prefetch. Alternatively one could start a new (sub-) |
| 154 | * prefetch for this branch, starting again from root. |
| 155 | * FIXME: move the generation check out of this loop |
| 156 | */ |
| 157 | #ifdef DEBUG |
| 158 | if (rec->generation != generation) { |
Zhao Lei | 02873e4 | 2015-12-31 22:46:45 +0800 | [diff] [blame] | 159 | btrfs_debug(fs_info, |
| 160 | "generation mismatch for (%llu,%d,%llu) %llu != %llu", |
| 161 | key.objectid, key.type, key.offset, |
| 162 | rec->generation, generation); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 163 | } |
| 164 | #endif |
| 165 | if (rec->generation == generation && |
| 166 | btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 && |
| 167 | btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0) |
Zhao Lei | 1e7970c | 2015-12-31 20:30:00 +0800 | [diff] [blame] | 168 | reada_add_block(rc, bytenr, &next_key, n_gen); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 169 | } |
| 170 | } |
Zhao Lei | 57f16e0 | 2015-12-31 22:20:59 +0800 | [diff] [blame] | 171 | |
| 172 | cleanup: |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 173 | /* |
| 174 | * free extctl records |
| 175 | */ |
| 176 | while (!list_empty(&list)) { |
| 177 | struct reada_control *rc; |
| 178 | struct reada_extctl *rec; |
| 179 | |
| 180 | rec = list_first_entry(&list, struct reada_extctl, list); |
| 181 | list_del(&rec->list); |
| 182 | rc = rec->rc; |
| 183 | kfree(rec); |
| 184 | |
| 185 | kref_get(&rc->refcnt); |
| 186 | if (atomic_dec_and_test(&rc->elems)) { |
| 187 | kref_put(&rc->refcnt, reada_control_release); |
| 188 | wake_up(&rc->wait); |
| 189 | } |
| 190 | kref_put(&rc->refcnt, reada_control_release); |
| 191 | |
| 192 | reada_extent_put(fs_info, re); /* one ref for each entry */ |
| 193 | } |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 194 | |
Zhao Lei | 6e39dbe | 2015-12-31 22:09:05 +0800 | [diff] [blame] | 195 | return; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 196 | } |
| 197 | |
David Sterba | d48d71a | 2017-03-02 19:43:30 +0100 | [diff] [blame] | 198 | int btree_readahead_hook(struct extent_buffer *eb, int err) |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 199 | { |
David Sterba | d48d71a | 2017-03-02 19:43:30 +0100 | [diff] [blame] | 200 | struct btrfs_fs_info *fs_info = eb->fs_info; |
Zhao Lei | 6e39dbe | 2015-12-31 22:09:05 +0800 | [diff] [blame] | 201 | int ret = 0; |
| 202 | struct reada_extent *re; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 203 | |
Zhao Lei | 6e39dbe | 2015-12-31 22:09:05 +0800 | [diff] [blame] | 204 | /* find extent */ |
| 205 | spin_lock(&fs_info->reada_lock); |
| 206 | re = radix_tree_lookup(&fs_info->reada_tree, |
David Sterba | fc2e901 | 2016-11-08 13:50:03 +0100 | [diff] [blame] | 207 | eb->start >> PAGE_SHIFT); |
Zhao Lei | 6e39dbe | 2015-12-31 22:09:05 +0800 | [diff] [blame] | 208 | if (re) |
| 209 | re->refcnt++; |
| 210 | spin_unlock(&fs_info->reada_lock); |
| 211 | if (!re) { |
| 212 | ret = -1; |
| 213 | goto start_machine; |
| 214 | } |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 215 | |
David Sterba | bcdc51b | 2016-11-08 13:39:05 +0100 | [diff] [blame] | 216 | __readahead_hook(fs_info, re, eb, err); |
Zhao Lei | 6e39dbe | 2015-12-31 22:09:05 +0800 | [diff] [blame] | 217 | reada_extent_put(fs_info, re); /* our ref */ |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 218 | |
Zhao Lei | 6e39dbe | 2015-12-31 22:09:05 +0800 | [diff] [blame] | 219 | start_machine: |
| 220 | reada_start_machine(fs_info); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 221 | return ret; |
| 222 | } |
| 223 | |
David Sterba | 0ceaf28 | 2017-03-02 19:43:30 +0100 | [diff] [blame] | 224 | static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical, |
Ilya Dryomov | 21ca543 | 2011-11-04 09:41:02 -0400 | [diff] [blame] | 225 | struct btrfs_bio *bbio) |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 226 | { |
David Sterba | 0ceaf28 | 2017-03-02 19:43:30 +0100 | [diff] [blame] | 227 | struct btrfs_fs_info *fs_info = dev->fs_info; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 228 | int ret; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 229 | struct reada_zone *zone; |
| 230 | struct btrfs_block_group_cache *cache = NULL; |
| 231 | u64 start; |
| 232 | u64 end; |
| 233 | int i; |
| 234 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 235 | zone = NULL; |
| 236 | spin_lock(&fs_info->reada_lock); |
| 237 | ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 238 | logical >> PAGE_SHIFT, 1); |
Zhao Lei | c37f49c | 2015-12-18 21:48:48 +0800 | [diff] [blame] | 239 | if (ret == 1 && logical >= zone->start && logical <= zone->end) { |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 240 | kref_get(&zone->refcnt); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 241 | spin_unlock(&fs_info->reada_lock); |
Zhao Lei | c37f49c | 2015-12-18 21:48:48 +0800 | [diff] [blame] | 242 | return zone; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 243 | } |
| 244 | |
Zhao Lei | c37f49c | 2015-12-18 21:48:48 +0800 | [diff] [blame] | 245 | spin_unlock(&fs_info->reada_lock); |
| 246 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 247 | cache = btrfs_lookup_block_group(fs_info, logical); |
| 248 | if (!cache) |
| 249 | return NULL; |
| 250 | |
| 251 | start = cache->key.objectid; |
| 252 | end = start + cache->key.offset - 1; |
| 253 | btrfs_put_block_group(cache); |
| 254 | |
David Sterba | ed0244f | 2016-01-18 18:42:13 +0100 | [diff] [blame] | 255 | zone = kzalloc(sizeof(*zone), GFP_KERNEL); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 256 | if (!zone) |
| 257 | return NULL; |
| 258 | |
David Sterba | cc8385b | 2017-03-02 18:54:52 +0100 | [diff] [blame] | 259 | ret = radix_tree_preload(GFP_KERNEL); |
| 260 | if (ret) { |
| 261 | kfree(zone); |
| 262 | return NULL; |
| 263 | } |
| 264 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 265 | zone->start = start; |
| 266 | zone->end = end; |
| 267 | INIT_LIST_HEAD(&zone->list); |
| 268 | spin_lock_init(&zone->lock); |
| 269 | zone->locked = 0; |
| 270 | kref_init(&zone->refcnt); |
| 271 | zone->elems = 0; |
| 272 | zone->device = dev; /* our device always sits at index 0 */ |
Ilya Dryomov | 21ca543 | 2011-11-04 09:41:02 -0400 | [diff] [blame] | 273 | for (i = 0; i < bbio->num_stripes; ++i) { |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 274 | /* bounds have already been checked */ |
Ilya Dryomov | 21ca543 | 2011-11-04 09:41:02 -0400 | [diff] [blame] | 275 | zone->devs[i] = bbio->stripes[i].dev; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 276 | } |
Ilya Dryomov | 21ca543 | 2011-11-04 09:41:02 -0400 | [diff] [blame] | 277 | zone->ndevs = bbio->num_stripes; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 278 | |
| 279 | spin_lock(&fs_info->reada_lock); |
| 280 | ret = radix_tree_insert(&dev->reada_zones, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 281 | (unsigned long)(zone->end >> PAGE_SHIFT), |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 282 | zone); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 283 | |
Arne Jansen | 8c9c2bf | 2012-02-25 09:09:30 +0100 | [diff] [blame] | 284 | if (ret == -EEXIST) { |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 285 | kfree(zone); |
Arne Jansen | 8c9c2bf | 2012-02-25 09:09:30 +0100 | [diff] [blame] | 286 | ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 287 | logical >> PAGE_SHIFT, 1); |
Zhao Lei | 8e9aa51 | 2015-12-18 21:56:08 +0800 | [diff] [blame] | 288 | if (ret == 1 && logical >= zone->start && logical <= zone->end) |
Arne Jansen | 8c9c2bf | 2012-02-25 09:09:30 +0100 | [diff] [blame] | 289 | kref_get(&zone->refcnt); |
Zhao Lei | 8e9aa51 | 2015-12-18 21:56:08 +0800 | [diff] [blame] | 290 | else |
| 291 | zone = NULL; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 292 | } |
Arne Jansen | 8c9c2bf | 2012-02-25 09:09:30 +0100 | [diff] [blame] | 293 | spin_unlock(&fs_info->reada_lock); |
David Sterba | cc8385b | 2017-03-02 18:54:52 +0100 | [diff] [blame] | 294 | radix_tree_preload_end(); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 295 | |
| 296 | return zone; |
| 297 | } |
| 298 | |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 299 | static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 300 | u64 logical, |
Zhao Lei | 1e7970c | 2015-12-31 20:30:00 +0800 | [diff] [blame] | 301 | struct btrfs_key *top) |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 302 | { |
| 303 | int ret; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 304 | struct reada_extent *re = NULL; |
Arne Jansen | 8c9c2bf | 2012-02-25 09:09:30 +0100 | [diff] [blame] | 305 | struct reada_extent *re_exist = NULL; |
Ilya Dryomov | 21ca543 | 2011-11-04 09:41:02 -0400 | [diff] [blame] | 306 | struct btrfs_bio *bbio = NULL; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 307 | struct btrfs_device *dev; |
Arne Jansen | 207a232 | 2012-02-25 09:09:47 +0100 | [diff] [blame] | 308 | struct btrfs_device *prev_dev; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 309 | u64 length; |
Omar Sandoval | 7cb2c42 | 2015-06-19 11:52:49 -0700 | [diff] [blame] | 310 | int real_stripes; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 311 | int nzones = 0; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 312 | unsigned long index = logical >> PAGE_SHIFT; |
Stefan Behrens | 8dabb74 | 2012-11-06 13:15:27 +0100 | [diff] [blame] | 313 | int dev_replace_is_ongoing; |
Zhao Lei | 3194502 | 2015-12-31 18:48:54 +0800 | [diff] [blame] | 314 | int have_zone = 0; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 315 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 316 | spin_lock(&fs_info->reada_lock); |
| 317 | re = radix_tree_lookup(&fs_info->reada_tree, index); |
| 318 | if (re) |
Al Viro | 99621b4 | 2012-08-29 16:31:33 -0400 | [diff] [blame] | 319 | re->refcnt++; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 320 | spin_unlock(&fs_info->reada_lock); |
| 321 | |
Arne Jansen | 8c9c2bf | 2012-02-25 09:09:30 +0100 | [diff] [blame] | 322 | if (re) |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 323 | return re; |
| 324 | |
David Sterba | ed0244f | 2016-01-18 18:42:13 +0100 | [diff] [blame] | 325 | re = kzalloc(sizeof(*re), GFP_KERNEL); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 326 | if (!re) |
| 327 | return NULL; |
| 328 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 329 | re->logical = logical; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 330 | re->top = *top; |
| 331 | INIT_LIST_HEAD(&re->extctl); |
| 332 | spin_lock_init(&re->lock); |
Al Viro | 99621b4 | 2012-08-29 16:31:33 -0400 | [diff] [blame] | 333 | re->refcnt = 1; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 334 | |
| 335 | /* |
| 336 | * map block |
| 337 | */ |
David Sterba | 994a5d2 | 2017-03-15 16:39:59 +0100 | [diff] [blame] | 338 | length = fs_info->nodesize; |
Christoph Hellwig | cf8cddd | 2016-10-27 09:27:36 +0200 | [diff] [blame] | 339 | ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, |
| 340 | &length, &bbio, 0); |
David Sterba | 994a5d2 | 2017-03-15 16:39:59 +0100 | [diff] [blame] | 341 | if (ret || !bbio || length < fs_info->nodesize) |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 342 | goto error; |
| 343 | |
Stefan Behrens | 94598ba | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 344 | if (bbio->num_stripes > BTRFS_MAX_MIRRORS) { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 345 | btrfs_err(fs_info, |
Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 346 | "readahead: more than %d copies not supported", |
| 347 | BTRFS_MAX_MIRRORS); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 348 | goto error; |
| 349 | } |
| 350 | |
Omar Sandoval | 7cb2c42 | 2015-06-19 11:52:49 -0700 | [diff] [blame] | 351 | real_stripes = bbio->num_stripes - bbio->num_tgtdevs; |
| 352 | for (nzones = 0; nzones < real_stripes; ++nzones) { |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 353 | struct reada_zone *zone; |
| 354 | |
Ilya Dryomov | 21ca543 | 2011-11-04 09:41:02 -0400 | [diff] [blame] | 355 | dev = bbio->stripes[nzones].dev; |
Zhao Lei | 7aff8cf | 2016-01-14 18:39:00 +0800 | [diff] [blame] | 356 | |
| 357 | /* cannot read ahead on missing device. */ |
| 358 | if (!dev->bdev) |
| 359 | continue; |
| 360 | |
David Sterba | 0ceaf28 | 2017-03-02 19:43:30 +0100 | [diff] [blame] | 361 | zone = reada_find_zone(dev, logical, bbio); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 362 | if (!zone) |
Zhao Lei | 6a159d2 | 2015-12-31 18:15:47 +0800 | [diff] [blame] | 363 | continue; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 364 | |
Zhao Lei | 6a159d2 | 2015-12-31 18:15:47 +0800 | [diff] [blame] | 365 | re->zones[re->nzones++] = zone; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 366 | spin_lock(&zone->lock); |
| 367 | if (!zone->elems) |
| 368 | kref_get(&zone->refcnt); |
| 369 | ++zone->elems; |
| 370 | spin_unlock(&zone->lock); |
| 371 | spin_lock(&fs_info->reada_lock); |
| 372 | kref_put(&zone->refcnt, reada_zone_release); |
| 373 | spin_unlock(&fs_info->reada_lock); |
| 374 | } |
Zhao Lei | 6a159d2 | 2015-12-31 18:15:47 +0800 | [diff] [blame] | 375 | if (re->nzones == 0) { |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 376 | /* not a single zone found, error and out */ |
| 377 | goto error; |
| 378 | } |
| 379 | |
David Sterba | 7ef70b4 | 2017-03-02 18:54:52 +0100 | [diff] [blame] | 380 | ret = radix_tree_preload(GFP_KERNEL); |
| 381 | if (ret) |
| 382 | goto error; |
| 383 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 384 | /* insert extent in reada_tree + all per-device trees, all or nothing */ |
David Sterba | 7e79cb8 | 2018-03-24 02:11:38 +0100 | [diff] [blame] | 385 | btrfs_dev_replace_read_lock(&fs_info->dev_replace); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 386 | spin_lock(&fs_info->reada_lock); |
| 387 | ret = radix_tree_insert(&fs_info->reada_tree, index, re); |
Arne Jansen | 8c9c2bf | 2012-02-25 09:09:30 +0100 | [diff] [blame] | 388 | if (ret == -EEXIST) { |
| 389 | re_exist = radix_tree_lookup(&fs_info->reada_tree, index); |
Al Viro | 99621b4 | 2012-08-29 16:31:33 -0400 | [diff] [blame] | 390 | re_exist->refcnt++; |
Arne Jansen | 8c9c2bf | 2012-02-25 09:09:30 +0100 | [diff] [blame] | 391 | spin_unlock(&fs_info->reada_lock); |
David Sterba | 7e79cb8 | 2018-03-24 02:11:38 +0100 | [diff] [blame] | 392 | btrfs_dev_replace_read_unlock(&fs_info->dev_replace); |
David Sterba | 7ef70b4 | 2017-03-02 18:54:52 +0100 | [diff] [blame] | 393 | radix_tree_preload_end(); |
Arne Jansen | 8c9c2bf | 2012-02-25 09:09:30 +0100 | [diff] [blame] | 394 | goto error; |
| 395 | } |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 396 | if (ret) { |
| 397 | spin_unlock(&fs_info->reada_lock); |
David Sterba | 7e79cb8 | 2018-03-24 02:11:38 +0100 | [diff] [blame] | 398 | btrfs_dev_replace_read_unlock(&fs_info->dev_replace); |
David Sterba | 7ef70b4 | 2017-03-02 18:54:52 +0100 | [diff] [blame] | 399 | radix_tree_preload_end(); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 400 | goto error; |
| 401 | } |
David Sterba | 7ef70b4 | 2017-03-02 18:54:52 +0100 | [diff] [blame] | 402 | radix_tree_preload_end(); |
Arne Jansen | 207a232 | 2012-02-25 09:09:47 +0100 | [diff] [blame] | 403 | prev_dev = NULL; |
Stefan Behrens | 8dabb74 | 2012-11-06 13:15:27 +0100 | [diff] [blame] | 404 | dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing( |
| 405 | &fs_info->dev_replace); |
Zhao Lei | 6a159d2 | 2015-12-31 18:15:47 +0800 | [diff] [blame] | 406 | for (nzones = 0; nzones < re->nzones; ++nzones) { |
| 407 | dev = re->zones[nzones]->device; |
| 408 | |
Arne Jansen | 207a232 | 2012-02-25 09:09:47 +0100 | [diff] [blame] | 409 | if (dev == prev_dev) { |
| 410 | /* |
| 411 | * in case of DUP, just add the first zone. As both |
| 412 | * are on the same device, there's nothing to gain |
| 413 | * from adding both. |
| 414 | * Also, it wouldn't work, as the tree is per device |
| 415 | * and adding would fail with EEXIST |
| 416 | */ |
| 417 | continue; |
| 418 | } |
Zhao Lei | 7aff8cf | 2016-01-14 18:39:00 +0800 | [diff] [blame] | 419 | if (!dev->bdev) |
| 420 | continue; |
| 421 | |
Stefan Behrens | 8dabb74 | 2012-11-06 13:15:27 +0100 | [diff] [blame] | 422 | if (dev_replace_is_ongoing && |
| 423 | dev == fs_info->dev_replace.tgtdev) { |
| 424 | /* |
| 425 | * as this device is selected for reading only as |
| 426 | * a last resort, skip it for read ahead. |
| 427 | */ |
| 428 | continue; |
| 429 | } |
Arne Jansen | 207a232 | 2012-02-25 09:09:47 +0100 | [diff] [blame] | 430 | prev_dev = dev; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 431 | ret = radix_tree_insert(&dev->reada_extents, index, re); |
| 432 | if (ret) { |
Zhao Lei | 6a159d2 | 2015-12-31 18:15:47 +0800 | [diff] [blame] | 433 | while (--nzones >= 0) { |
| 434 | dev = re->zones[nzones]->device; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 435 | BUG_ON(dev == NULL); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 436 | /* ignore whether the entry was inserted */ |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 437 | radix_tree_delete(&dev->reada_extents, index); |
| 438 | } |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 439 | radix_tree_delete(&fs_info->reada_tree, index); |
| 440 | spin_unlock(&fs_info->reada_lock); |
David Sterba | 7e79cb8 | 2018-03-24 02:11:38 +0100 | [diff] [blame] | 441 | btrfs_dev_replace_read_unlock(&fs_info->dev_replace); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 442 | goto error; |
| 443 | } |
Zhao Lei | 3194502 | 2015-12-31 18:48:54 +0800 | [diff] [blame] | 444 | have_zone = 1; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 445 | } |
| 446 | spin_unlock(&fs_info->reada_lock); |
David Sterba | 7e79cb8 | 2018-03-24 02:11:38 +0100 | [diff] [blame] | 447 | btrfs_dev_replace_read_unlock(&fs_info->dev_replace); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 448 | |
Zhao Lei | 3194502 | 2015-12-31 18:48:54 +0800 | [diff] [blame] | 449 | if (!have_zone) |
| 450 | goto error; |
| 451 | |
Zhao Lei | 6e9606d | 2015-01-20 15:11:34 +0800 | [diff] [blame] | 452 | btrfs_put_bbio(bbio); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 453 | return re; |
| 454 | |
| 455 | error: |
Zhao Lei | 6a159d2 | 2015-12-31 18:15:47 +0800 | [diff] [blame] | 456 | for (nzones = 0; nzones < re->nzones; ++nzones) { |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 457 | struct reada_zone *zone; |
| 458 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 459 | zone = re->zones[nzones]; |
| 460 | kref_get(&zone->refcnt); |
| 461 | spin_lock(&zone->lock); |
| 462 | --zone->elems; |
| 463 | if (zone->elems == 0) { |
| 464 | /* |
| 465 | * no fs_info->reada_lock needed, as this can't be |
| 466 | * the last ref |
| 467 | */ |
| 468 | kref_put(&zone->refcnt, reada_zone_release); |
| 469 | } |
| 470 | spin_unlock(&zone->lock); |
| 471 | |
| 472 | spin_lock(&fs_info->reada_lock); |
| 473 | kref_put(&zone->refcnt, reada_zone_release); |
| 474 | spin_unlock(&fs_info->reada_lock); |
| 475 | } |
Zhao Lei | 6e9606d | 2015-01-20 15:11:34 +0800 | [diff] [blame] | 476 | btrfs_put_bbio(bbio); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 477 | kfree(re); |
Arne Jansen | 8c9c2bf | 2012-02-25 09:09:30 +0100 | [diff] [blame] | 478 | return re_exist; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 479 | } |
| 480 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 481 | static void reada_extent_put(struct btrfs_fs_info *fs_info, |
| 482 | struct reada_extent *re) |
| 483 | { |
| 484 | int i; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 485 | unsigned long index = re->logical >> PAGE_SHIFT; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 486 | |
| 487 | spin_lock(&fs_info->reada_lock); |
Al Viro | 99621b4 | 2012-08-29 16:31:33 -0400 | [diff] [blame] | 488 | if (--re->refcnt) { |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 489 | spin_unlock(&fs_info->reada_lock); |
| 490 | return; |
| 491 | } |
| 492 | |
| 493 | radix_tree_delete(&fs_info->reada_tree, index); |
| 494 | for (i = 0; i < re->nzones; ++i) { |
| 495 | struct reada_zone *zone = re->zones[i]; |
| 496 | |
| 497 | radix_tree_delete(&zone->device->reada_extents, index); |
| 498 | } |
| 499 | |
| 500 | spin_unlock(&fs_info->reada_lock); |
| 501 | |
| 502 | for (i = 0; i < re->nzones; ++i) { |
| 503 | struct reada_zone *zone = re->zones[i]; |
| 504 | |
| 505 | kref_get(&zone->refcnt); |
| 506 | spin_lock(&zone->lock); |
| 507 | --zone->elems; |
| 508 | if (zone->elems == 0) { |
| 509 | /* no fs_info->reada_lock needed, as this can't be |
| 510 | * the last ref */ |
| 511 | kref_put(&zone->refcnt, reada_zone_release); |
| 512 | } |
| 513 | spin_unlock(&zone->lock); |
| 514 | |
| 515 | spin_lock(&fs_info->reada_lock); |
| 516 | kref_put(&zone->refcnt, reada_zone_release); |
| 517 | spin_unlock(&fs_info->reada_lock); |
| 518 | } |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 519 | |
| 520 | kfree(re); |
| 521 | } |
| 522 | |
| 523 | static void reada_zone_release(struct kref *kref) |
| 524 | { |
| 525 | struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt); |
| 526 | |
| 527 | radix_tree_delete(&zone->device->reada_zones, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 528 | zone->end >> PAGE_SHIFT); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 529 | |
| 530 | kfree(zone); |
| 531 | } |
| 532 | |
| 533 | static void reada_control_release(struct kref *kref) |
| 534 | { |
| 535 | struct reada_control *rc = container_of(kref, struct reada_control, |
| 536 | refcnt); |
| 537 | |
| 538 | kfree(rc); |
| 539 | } |
| 540 | |
| 541 | static int reada_add_block(struct reada_control *rc, u64 logical, |
Zhao Lei | 1e7970c | 2015-12-31 20:30:00 +0800 | [diff] [blame] | 542 | struct btrfs_key *top, u64 generation) |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 543 | { |
Jeff Mahoney | c28f158 | 2016-06-22 18:56:44 -0400 | [diff] [blame] | 544 | struct btrfs_fs_info *fs_info = rc->fs_info; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 545 | struct reada_extent *re; |
| 546 | struct reada_extctl *rec; |
| 547 | |
Jeff Mahoney | c28f158 | 2016-06-22 18:56:44 -0400 | [diff] [blame] | 548 | /* takes one ref */ |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 549 | re = reada_find_extent(fs_info, logical, top); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 550 | if (!re) |
| 551 | return -1; |
| 552 | |
David Sterba | ed0244f | 2016-01-18 18:42:13 +0100 | [diff] [blame] | 553 | rec = kzalloc(sizeof(*rec), GFP_KERNEL); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 554 | if (!rec) { |
Jeff Mahoney | c28f158 | 2016-06-22 18:56:44 -0400 | [diff] [blame] | 555 | reada_extent_put(fs_info, re); |
Luis de Bethencourt | ddd664f | 2015-10-20 14:56:23 +0100 | [diff] [blame] | 556 | return -ENOMEM; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 557 | } |
| 558 | |
| 559 | rec->rc = rc; |
| 560 | rec->generation = generation; |
| 561 | atomic_inc(&rc->elems); |
| 562 | |
| 563 | spin_lock(&re->lock); |
| 564 | list_add_tail(&rec->list, &re->extctl); |
| 565 | spin_unlock(&re->lock); |
| 566 | |
| 567 | /* leave the ref on the extent */ |
| 568 | |
| 569 | return 0; |
| 570 | } |
| 571 | |
| 572 | /* |
| 573 | * called with fs_info->reada_lock held |
| 574 | */ |
| 575 | static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock) |
| 576 | { |
| 577 | int i; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 578 | unsigned long index = zone->end >> PAGE_SHIFT; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 579 | |
| 580 | for (i = 0; i < zone->ndevs; ++i) { |
| 581 | struct reada_zone *peer; |
| 582 | peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index); |
| 583 | if (peer && peer->device != zone->device) |
| 584 | peer->locked = lock; |
| 585 | } |
| 586 | } |
| 587 | |
| 588 | /* |
| 589 | * called with fs_info->reada_lock held |
| 590 | */ |
| 591 | static int reada_pick_zone(struct btrfs_device *dev) |
| 592 | { |
| 593 | struct reada_zone *top_zone = NULL; |
| 594 | struct reada_zone *top_locked_zone = NULL; |
| 595 | u64 top_elems = 0; |
| 596 | u64 top_locked_elems = 0; |
| 597 | unsigned long index = 0; |
| 598 | int ret; |
| 599 | |
| 600 | if (dev->reada_curr_zone) { |
| 601 | reada_peer_zones_set_lock(dev->reada_curr_zone, 0); |
| 602 | kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release); |
| 603 | dev->reada_curr_zone = NULL; |
| 604 | } |
| 605 | /* pick the zone with the most elements */ |
| 606 | while (1) { |
| 607 | struct reada_zone *zone; |
| 608 | |
| 609 | ret = radix_tree_gang_lookup(&dev->reada_zones, |
| 610 | (void **)&zone, index, 1); |
| 611 | if (ret == 0) |
| 612 | break; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 613 | index = (zone->end >> PAGE_SHIFT) + 1; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 614 | if (zone->locked) { |
| 615 | if (zone->elems > top_locked_elems) { |
| 616 | top_locked_elems = zone->elems; |
| 617 | top_locked_zone = zone; |
| 618 | } |
| 619 | } else { |
| 620 | if (zone->elems > top_elems) { |
| 621 | top_elems = zone->elems; |
| 622 | top_zone = zone; |
| 623 | } |
| 624 | } |
| 625 | } |
| 626 | if (top_zone) |
| 627 | dev->reada_curr_zone = top_zone; |
| 628 | else if (top_locked_zone) |
| 629 | dev->reada_curr_zone = top_locked_zone; |
| 630 | else |
| 631 | return 0; |
| 632 | |
| 633 | dev->reada_next = dev->reada_curr_zone->start; |
| 634 | kref_get(&dev->reada_curr_zone->refcnt); |
| 635 | reada_peer_zones_set_lock(dev->reada_curr_zone, 1); |
| 636 | |
| 637 | return 1; |
| 638 | } |
| 639 | |
David Sterba | 5721b8a | 2017-03-02 19:43:30 +0100 | [diff] [blame] | 640 | static int reada_start_machine_dev(struct btrfs_device *dev) |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 641 | { |
David Sterba | 5721b8a | 2017-03-02 19:43:30 +0100 | [diff] [blame] | 642 | struct btrfs_fs_info *fs_info = dev->fs_info; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 643 | struct reada_extent *re = NULL; |
| 644 | int mirror_num = 0; |
| 645 | struct extent_buffer *eb = NULL; |
| 646 | u64 logical; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 647 | int ret; |
| 648 | int i; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 649 | |
| 650 | spin_lock(&fs_info->reada_lock); |
| 651 | if (dev->reada_curr_zone == NULL) { |
| 652 | ret = reada_pick_zone(dev); |
| 653 | if (!ret) { |
| 654 | spin_unlock(&fs_info->reada_lock); |
| 655 | return 0; |
| 656 | } |
| 657 | } |
| 658 | /* |
| 659 | * FIXME currently we issue the reads one extent at a time. If we have |
| 660 | * a contiguous block of extents, we could also coagulate them or use |
| 661 | * plugging to speed things up |
| 662 | */ |
| 663 | ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 664 | dev->reada_next >> PAGE_SHIFT, 1); |
Zhao Lei | 5037853 | 2015-12-18 21:33:05 +0800 | [diff] [blame] | 665 | if (ret == 0 || re->logical > dev->reada_curr_zone->end) { |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 666 | ret = reada_pick_zone(dev); |
| 667 | if (!ret) { |
| 668 | spin_unlock(&fs_info->reada_lock); |
| 669 | return 0; |
| 670 | } |
| 671 | re = NULL; |
| 672 | ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 673 | dev->reada_next >> PAGE_SHIFT, 1); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 674 | } |
| 675 | if (ret == 0) { |
| 676 | spin_unlock(&fs_info->reada_lock); |
| 677 | return 0; |
| 678 | } |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 679 | dev->reada_next = re->logical + fs_info->nodesize; |
Al Viro | 99621b4 | 2012-08-29 16:31:33 -0400 | [diff] [blame] | 680 | re->refcnt++; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 681 | |
| 682 | spin_unlock(&fs_info->reada_lock); |
| 683 | |
Zhao Lei | a3f7fde | 2015-12-31 22:57:52 +0800 | [diff] [blame] | 684 | spin_lock(&re->lock); |
Zhao Lei | 895a11b | 2016-01-12 14:58:39 +0800 | [diff] [blame] | 685 | if (re->scheduled || list_empty(&re->extctl)) { |
Zhao Lei | a3f7fde | 2015-12-31 22:57:52 +0800 | [diff] [blame] | 686 | spin_unlock(&re->lock); |
| 687 | reada_extent_put(fs_info, re); |
| 688 | return 0; |
| 689 | } |
Zhao Lei | 895a11b | 2016-01-12 14:58:39 +0800 | [diff] [blame] | 690 | re->scheduled = 1; |
Zhao Lei | a3f7fde | 2015-12-31 22:57:52 +0800 | [diff] [blame] | 691 | spin_unlock(&re->lock); |
| 692 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 693 | /* |
| 694 | * find mirror num |
| 695 | */ |
| 696 | for (i = 0; i < re->nzones; ++i) { |
| 697 | if (re->zones[i]->device == dev) { |
| 698 | mirror_num = i + 1; |
| 699 | break; |
| 700 | } |
| 701 | } |
| 702 | logical = re->logical; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 703 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 704 | atomic_inc(&dev->reada_in_flight); |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 705 | ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 706 | if (ret) |
David Sterba | bcdc51b | 2016-11-08 13:39:05 +0100 | [diff] [blame] | 707 | __readahead_hook(fs_info, re, NULL, ret); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 708 | else if (eb) |
David Sterba | bcdc51b | 2016-11-08 13:39:05 +0100 | [diff] [blame] | 709 | __readahead_hook(fs_info, re, eb, ret); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 710 | |
| 711 | if (eb) |
| 712 | free_extent_buffer(eb); |
| 713 | |
Zhao Lei | 895a11b | 2016-01-12 14:58:39 +0800 | [diff] [blame] | 714 | atomic_dec(&dev->reada_in_flight); |
Zhao Lei | b257cf5 | 2015-12-31 21:07:17 +0800 | [diff] [blame] | 715 | reada_extent_put(fs_info, re); |
| 716 | |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 717 | return 1; |
| 718 | |
| 719 | } |
| 720 | |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 721 | static void reada_start_machine_worker(struct btrfs_work *work) |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 722 | { |
| 723 | struct reada_machine_work *rmw; |
| 724 | struct btrfs_fs_info *fs_info; |
Stefan Behrens | 3d136a1 | 2012-02-03 11:20:04 +0100 | [diff] [blame] | 725 | int old_ioprio; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 726 | |
| 727 | rmw = container_of(work, struct reada_machine_work, work); |
| 728 | fs_info = rmw->fs_info; |
| 729 | |
| 730 | kfree(rmw); |
| 731 | |
Stefan Behrens | 3d136a1 | 2012-02-03 11:20:04 +0100 | [diff] [blame] | 732 | old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current), |
| 733 | task_nice_ioprio(current)); |
| 734 | set_task_ioprio(current, BTRFS_IOPRIO_READA); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 735 | __reada_start_machine(fs_info); |
Stefan Behrens | 3d136a1 | 2012-02-03 11:20:04 +0100 | [diff] [blame] | 736 | set_task_ioprio(current, old_ioprio); |
Zhao Lei | 2fefd55 | 2016-01-07 18:38:48 +0800 | [diff] [blame] | 737 | |
| 738 | atomic_dec(&fs_info->reada_works_cnt); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 739 | } |
| 740 | |
| 741 | static void __reada_start_machine(struct btrfs_fs_info *fs_info) |
| 742 | { |
| 743 | struct btrfs_device *device; |
| 744 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
| 745 | u64 enqueued; |
| 746 | u64 total = 0; |
| 747 | int i; |
| 748 | |
| 749 | do { |
| 750 | enqueued = 0; |
Filipe Manana | ce7791f | 2016-05-20 01:57:20 +0100 | [diff] [blame] | 751 | mutex_lock(&fs_devices->device_list_mutex); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 752 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
| 753 | if (atomic_read(&device->reada_in_flight) < |
| 754 | MAX_IN_FLIGHT) |
David Sterba | 5721b8a | 2017-03-02 19:43:30 +0100 | [diff] [blame] | 755 | enqueued += reada_start_machine_dev(device); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 756 | } |
Filipe Manana | ce7791f | 2016-05-20 01:57:20 +0100 | [diff] [blame] | 757 | mutex_unlock(&fs_devices->device_list_mutex); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 758 | total += enqueued; |
| 759 | } while (enqueued && total < 10000); |
| 760 | |
| 761 | if (enqueued == 0) |
| 762 | return; |
| 763 | |
| 764 | /* |
| 765 | * If everything is already in the cache, this is effectively single |
| 766 | * threaded. To a) not hold the caller for too long and b) to utilize |
| 767 | * more cores, we broke the loop above after 10000 iterations and now |
| 768 | * enqueue to workers to finish it. This will distribute the load to |
| 769 | * the cores. |
| 770 | */ |
Zhao Lei | 2fefd55 | 2016-01-07 18:38:48 +0800 | [diff] [blame] | 771 | for (i = 0; i < 2; ++i) { |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 772 | reada_start_machine(fs_info); |
Zhao Lei | 2fefd55 | 2016-01-07 18:38:48 +0800 | [diff] [blame] | 773 | if (atomic_read(&fs_info->reada_works_cnt) > |
| 774 | BTRFS_MAX_MIRRORS * 2) |
| 775 | break; |
| 776 | } |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 777 | } |
| 778 | |
| 779 | static void reada_start_machine(struct btrfs_fs_info *fs_info) |
| 780 | { |
| 781 | struct reada_machine_work *rmw; |
| 782 | |
David Sterba | ed0244f | 2016-01-18 18:42:13 +0100 | [diff] [blame] | 783 | rmw = kzalloc(sizeof(*rmw), GFP_KERNEL); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 784 | if (!rmw) { |
| 785 | /* FIXME we cannot handle this properly right now */ |
| 786 | BUG(); |
| 787 | } |
Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 788 | btrfs_init_work(&rmw->work, btrfs_readahead_helper, |
| 789 | reada_start_machine_worker, NULL, NULL); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 790 | rmw->fs_info = fs_info; |
| 791 | |
Qu Wenruo | 736cfa1 | 2014-02-28 10:46:13 +0800 | [diff] [blame] | 792 | btrfs_queue_work(fs_info->readahead_workers, &rmw->work); |
Zhao Lei | 2fefd55 | 2016-01-07 18:38:48 +0800 | [diff] [blame] | 793 | atomic_inc(&fs_info->reada_works_cnt); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 794 | } |
| 795 | |
| 796 | #ifdef DEBUG |
| 797 | static void dump_devs(struct btrfs_fs_info *fs_info, int all) |
| 798 | { |
| 799 | struct btrfs_device *device; |
| 800 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
| 801 | unsigned long index; |
| 802 | int ret; |
| 803 | int i; |
| 804 | int j; |
| 805 | int cnt; |
| 806 | |
| 807 | spin_lock(&fs_info->reada_lock); |
| 808 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
Jeff Mahoney | ab8d0fc | 2016-09-20 10:05:02 -0400 | [diff] [blame] | 809 | btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid, |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 810 | atomic_read(&device->reada_in_flight)); |
| 811 | index = 0; |
| 812 | while (1) { |
| 813 | struct reada_zone *zone; |
| 814 | ret = radix_tree_gang_lookup(&device->reada_zones, |
| 815 | (void **)&zone, index, 1); |
| 816 | if (ret == 0) |
| 817 | break; |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 818 | pr_debug(" zone %llu-%llu elems %llu locked %d devs", |
Jeff Mahoney | ab8d0fc | 2016-09-20 10:05:02 -0400 | [diff] [blame] | 819 | zone->start, zone->end, zone->elems, |
| 820 | zone->locked); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 821 | for (j = 0; j < zone->ndevs; ++j) { |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 822 | pr_cont(" %lld", |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 823 | zone->devs[j]->devid); |
| 824 | } |
| 825 | if (device->reada_curr_zone == zone) |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 826 | pr_cont(" curr off %llu", |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 827 | device->reada_next - zone->start); |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 828 | pr_cont("\n"); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 829 | index = (zone->end >> PAGE_SHIFT) + 1; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 830 | } |
| 831 | cnt = 0; |
| 832 | index = 0; |
| 833 | while (all) { |
| 834 | struct reada_extent *re = NULL; |
| 835 | |
| 836 | ret = radix_tree_gang_lookup(&device->reada_extents, |
| 837 | (void **)&re, index, 1); |
| 838 | if (ret == 0) |
| 839 | break; |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 840 | pr_debug(" re: logical %llu size %u empty %d scheduled %d", |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 841 | re->logical, fs_info->nodesize, |
Zhao Lei | 895a11b | 2016-01-12 14:58:39 +0800 | [diff] [blame] | 842 | list_empty(&re->extctl), re->scheduled); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 843 | |
| 844 | for (i = 0; i < re->nzones; ++i) { |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 845 | pr_cont(" zone %llu-%llu devs", |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 846 | re->zones[i]->start, |
| 847 | re->zones[i]->end); |
| 848 | for (j = 0; j < re->zones[i]->ndevs; ++j) { |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 849 | pr_cont(" %lld", |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 850 | re->zones[i]->devs[j]->devid); |
| 851 | } |
| 852 | } |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 853 | pr_cont("\n"); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 854 | index = (re->logical >> PAGE_SHIFT) + 1; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 855 | if (++cnt > 15) |
| 856 | break; |
| 857 | } |
| 858 | } |
| 859 | |
| 860 | index = 0; |
| 861 | cnt = 0; |
| 862 | while (all) { |
| 863 | struct reada_extent *re = NULL; |
| 864 | |
| 865 | ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re, |
| 866 | index, 1); |
| 867 | if (ret == 0) |
| 868 | break; |
Zhao Lei | 895a11b | 2016-01-12 14:58:39 +0800 | [diff] [blame] | 869 | if (!re->scheduled) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 870 | index = (re->logical >> PAGE_SHIFT) + 1; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 871 | continue; |
| 872 | } |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 873 | pr_debug("re: logical %llu size %u list empty %d scheduled %d", |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 874 | re->logical, fs_info->nodesize, |
Zhao Lei | 895a11b | 2016-01-12 14:58:39 +0800 | [diff] [blame] | 875 | list_empty(&re->extctl), re->scheduled); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 876 | for (i = 0; i < re->nzones; ++i) { |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 877 | pr_cont(" zone %llu-%llu devs", |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 878 | re->zones[i]->start, |
| 879 | re->zones[i]->end); |
Zhao Lei | 8afd684 | 2015-12-31 22:28:51 +0800 | [diff] [blame] | 880 | for (j = 0; j < re->zones[i]->ndevs; ++j) { |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 881 | pr_cont(" %lld", |
Zhao Lei | 8afd684 | 2015-12-31 22:28:51 +0800 | [diff] [blame] | 882 | re->zones[i]->devs[j]->devid); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 883 | } |
| 884 | } |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 885 | pr_cont("\n"); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 886 | index = (re->logical >> PAGE_SHIFT) + 1; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 887 | } |
| 888 | spin_unlock(&fs_info->reada_lock); |
| 889 | } |
| 890 | #endif |
| 891 | |
| 892 | /* |
| 893 | * interface |
| 894 | */ |
| 895 | struct reada_control *btrfs_reada_add(struct btrfs_root *root, |
| 896 | struct btrfs_key *key_start, struct btrfs_key *key_end) |
| 897 | { |
| 898 | struct reada_control *rc; |
| 899 | u64 start; |
| 900 | u64 generation; |
Luis de Bethencourt | ddd664f | 2015-10-20 14:56:23 +0100 | [diff] [blame] | 901 | int ret; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 902 | struct extent_buffer *node; |
| 903 | static struct btrfs_key max_key = { |
| 904 | .objectid = (u64)-1, |
| 905 | .type = (u8)-1, |
| 906 | .offset = (u64)-1 |
| 907 | }; |
| 908 | |
David Sterba | ed0244f | 2016-01-18 18:42:13 +0100 | [diff] [blame] | 909 | rc = kzalloc(sizeof(*rc), GFP_KERNEL); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 910 | if (!rc) |
| 911 | return ERR_PTR(-ENOMEM); |
| 912 | |
Jeff Mahoney | c28f158 | 2016-06-22 18:56:44 -0400 | [diff] [blame] | 913 | rc->fs_info = root->fs_info; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 914 | rc->key_start = *key_start; |
| 915 | rc->key_end = *key_end; |
| 916 | atomic_set(&rc->elems, 0); |
| 917 | init_waitqueue_head(&rc->wait); |
| 918 | kref_init(&rc->refcnt); |
| 919 | kref_get(&rc->refcnt); /* one ref for having elements */ |
| 920 | |
| 921 | node = btrfs_root_node(root); |
| 922 | start = node->start; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 923 | generation = btrfs_header_generation(node); |
| 924 | free_extent_buffer(node); |
| 925 | |
Zhao Lei | 1e7970c | 2015-12-31 20:30:00 +0800 | [diff] [blame] | 926 | ret = reada_add_block(rc, start, &max_key, generation); |
Luis de Bethencourt | ddd664f | 2015-10-20 14:56:23 +0100 | [diff] [blame] | 927 | if (ret) { |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 928 | kfree(rc); |
Luis de Bethencourt | ddd664f | 2015-10-20 14:56:23 +0100 | [diff] [blame] | 929 | return ERR_PTR(ret); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 930 | } |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 931 | |
| 932 | reada_start_machine(root->fs_info); |
| 933 | |
| 934 | return rc; |
| 935 | } |
| 936 | |
| 937 | #ifdef DEBUG |
| 938 | int btrfs_reada_wait(void *handle) |
| 939 | { |
| 940 | struct reada_control *rc = handle; |
Jeff Mahoney | c28f158 | 2016-06-22 18:56:44 -0400 | [diff] [blame] | 941 | struct btrfs_fs_info *fs_info = rc->fs_info; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 942 | |
| 943 | while (atomic_read(&rc->elems)) { |
Zhao Lei | 4fe7a0e | 2016-01-26 18:42:40 +0800 | [diff] [blame] | 944 | if (!atomic_read(&fs_info->reada_works_cnt)) |
| 945 | reada_start_machine(fs_info); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 946 | wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0, |
| 947 | 5 * HZ); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 948 | dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 949 | } |
| 950 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 951 | dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 952 | |
| 953 | kref_put(&rc->refcnt, reada_control_release); |
| 954 | |
| 955 | return 0; |
| 956 | } |
| 957 | #else |
| 958 | int btrfs_reada_wait(void *handle) |
| 959 | { |
| 960 | struct reada_control *rc = handle; |
Jeff Mahoney | c28f158 | 2016-06-22 18:56:44 -0400 | [diff] [blame] | 961 | struct btrfs_fs_info *fs_info = rc->fs_info; |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 962 | |
| 963 | while (atomic_read(&rc->elems)) { |
Zhao Lei | 4fe7a0e | 2016-01-26 18:42:40 +0800 | [diff] [blame] | 964 | if (!atomic_read(&fs_info->reada_works_cnt)) |
| 965 | reada_start_machine(fs_info); |
| 966 | wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0, |
| 967 | (HZ + 9) / 10); |
Arne Jansen | 7414a03 | 2011-05-23 14:33:49 +0200 | [diff] [blame] | 968 | } |
| 969 | |
| 970 | kref_put(&rc->refcnt, reada_control_release); |
| 971 | |
| 972 | return 0; |
| 973 | } |
| 974 | #endif |
| 975 | |
| 976 | void btrfs_reada_detach(void *handle) |
| 977 | { |
| 978 | struct reada_control *rc = handle; |
| 979 | |
| 980 | kref_put(&rc->refcnt, reada_control_release); |
| 981 | } |