blob: 23cb1dc7296bf2701fa93659bfeef3b8b66621ca [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kent Overstreetcafe5632013-03-23 16:11:31 -07002/*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
13 *
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
16 *
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
20 *
Mauro Carvalho Chehab5fb94e92018-05-08 15:14:57 -030021 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
Kent Overstreetcafe5632013-03-23 16:11:31 -070022 */
23
24#include "bcache.h"
25#include "btree.h"
26#include "debug.h"
Kent Overstreet65d45232013-12-20 17:22:05 -080027#include "extents.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070028
29#include <linux/slab.h>
30#include <linux/bitops.h>
31#include <linux/hash.h>
Kent Overstreet72a44512013-10-24 17:19:26 -070032#include <linux/kthread.h>
Geert Uytterhoevencd953ed2013-03-27 18:56:28 +010033#include <linux/prefetch.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070034#include <linux/random.h>
35#include <linux/rcupdate.h>
Ingo Molnare6017572017-02-01 16:36:40 +010036#include <linux/sched/clock.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010037#include <linux/rculist.h>
38
Kent Overstreetcafe5632013-03-23 16:11:31 -070039#include <trace/events/bcache.h>
40
41/*
42 * Todo:
43 * register_bcache: Return errors out to userspace correctly
44 *
45 * Writeback: don't undirty key until after a cache flush
46 *
47 * Create an iterator for key pointers
48 *
49 * On btree write error, mark bucket such that it won't be freed from the cache
50 *
51 * Journalling:
52 * Check for bad keys in replay
53 * Propagate barriers
54 * Refcount journal entries in journal_replay
55 *
56 * Garbage collection:
57 * Finish incremental gc
58 * Gc should free old UUIDs, data for invalid UUIDs
59 *
60 * Provide a way to list backing device UUIDs we have data cached for, and
61 * probably how long it's been since we've seen them, and a way to invalidate
62 * dirty data for devices that will never be attached again
63 *
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65 * that based on that and how much dirty data we have we can keep writeback
66 * from being starved
67 *
68 * Add a tracepoint or somesuch to watch for writeback starvation
69 *
70 * When btree depth > 1 and splitting an interior node, we have to make sure
71 * alloc_bucket() cannot fail. This should be true but is not completely
72 * obvious.
73 *
Kent Overstreetcafe5632013-03-23 16:11:31 -070074 * Plugging?
75 *
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
78 *
Kent Overstreetcafe5632013-03-23 16:11:31 -070079 * Superblock needs to be fleshed out for multiple cache devices
80 *
81 * Add a sysfs tunable for the number of writeback IOs in flight
82 *
83 * Add a sysfs tunable for the number of open data buckets
84 *
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
87 *
88 * Test module load/unload
89 */
90
Kent Overstreetcafe5632013-03-23 16:11:31 -070091#define MAX_NEED_GC 64
92#define MAX_SAVE_PRIO 72
Tang Junhui7f4a59d2018-07-26 12:17:35 +080093#define MAX_GC_TIMES 100
Tang Junhui5c25c4f2018-07-26 12:17:34 +080094#define MIN_GC_NODES 100
95#define GC_SLEEP_MS 100
Kent Overstreetcafe5632013-03-23 16:11:31 -070096
97#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
98
99#define PTR_HASH(c, k) \
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700102#define insert_lock(s, b) ((b)->level <= (s)->lock)
103
104/*
105 * These macros are for recursing down the btree - they handle the details of
106 * locking and looking up nodes in the cache for you. They're best treated as
107 * mere syntax when reading code that uses them.
108 *
109 * op->lock determines whether we take a read or a write lock at a given depth.
110 * If you've got a read lock and find that you need a write lock (i.e. you're
111 * going to have to split), set op->lock and return -EINTR; btree_root() will
112 * call you again and you'll have the correct lock.
113 */
114
115/**
116 * btree - recurse down the btree on a specified key
117 * @fn: function to call, which will be passed the child node
118 * @key: key to recurse on
119 * @b: parent btree node
120 * @op: pointer to struct btree_op
121 */
122#define btree(fn, key, b, op, ...) \
123({ \
124 int _r, l = (b)->level - 1; \
125 bool _w = l <= (op)->lock; \
Slava Pestov2452cc82014-07-12 00:22:53 -0700126 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
127 _w, b); \
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700128 if (!IS_ERR(_child)) { \
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700129 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
130 rw_unlock(_w, _child); \
131 } else \
132 _r = PTR_ERR(_child); \
133 _r; \
134})
135
136/**
137 * btree_root - call a function on the root of the btree
138 * @fn: function to call, which will be passed the child node
139 * @c: cache set
140 * @op: pointer to struct btree_op
141 */
142#define btree_root(fn, c, op, ...) \
143({ \
144 int _r = -EINTR; \
145 do { \
146 struct btree *_b = (c)->root; \
147 bool _w = insert_lock(op, _b); \
148 rw_lock(_w, _b, _b->level); \
149 if (_b == (c)->root && \
150 _w == insert_lock(op, _b)) { \
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700151 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
152 } \
153 rw_unlock(_w, _b); \
Kent Overstreet0a63b662014-03-17 17:15:53 -0700154 bch_cannibalize_unlock(c); \
Kent Overstreet78365412013-12-17 01:29:34 -0800155 if (_r == -EINTR) \
156 schedule(); \
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700157 } while (_r == -EINTR); \
158 \
Kent Overstreet0a63b662014-03-17 17:15:53 -0700159 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700160 _r; \
161})
162
Kent Overstreeta85e9682013-12-20 17:28:16 -0800163static inline struct bset *write_block(struct btree *b)
164{
165 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
166}
167
Kent Overstreet2a285682014-03-04 16:42:42 -0800168static void bch_btree_init_next(struct btree *b)
169{
170 /* If not a leaf node, always sort */
171 if (b->level && b->keys.nsets)
172 bch_btree_sort(&b->keys, &b->c->sort);
173 else
174 bch_btree_sort_lazy(&b->keys, &b->c->sort);
175
176 if (b->written < btree_blocks(b))
177 bch_bset_init_next(&b->keys, write_block(b),
178 bset_magic(&b->c->sb));
179
180}
181
Kent Overstreetcafe5632013-03-23 16:11:31 -0700182/* Btree key manipulation */
183
Kent Overstreet3a3b6a42013-07-24 16:46:42 -0700184void bkey_put(struct cache_set *c, struct bkey *k)
Kent Overstreete7c590e2013-09-10 18:39:16 -0700185{
Coly Li6f10f7d2018-08-11 13:19:44 +0800186 unsigned int i;
Kent Overstreete7c590e2013-09-10 18:39:16 -0700187
188 for (i = 0; i < KEY_PTRS(k); i++)
189 if (ptr_available(c, k, i))
190 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
191}
192
Kent Overstreetcafe5632013-03-23 16:11:31 -0700193/* Btree IO */
194
195static uint64_t btree_csum_set(struct btree *b, struct bset *i)
196{
197 uint64_t crc = b->key.ptr[0];
Kent Overstreetfafff812013-12-17 21:56:21 -0800198 void *data = (void *) i + 8, *end = bset_bkey_last(i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700199
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600200 crc = bch_crc64_update(crc, data, end - data);
Kent Overstreetc19ed232013-03-26 13:49:02 -0700201 return crc ^ 0xffffffffffffffffULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700202}
203
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800204void bch_btree_node_read_done(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700205{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700206 const char *err = "bad btree header";
Kent Overstreetee811282013-12-17 23:49:49 -0800207 struct bset *i = btree_bset_first(b);
Kent Overstreet57943512013-04-25 13:58:35 -0700208 struct btree_iter *iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700209
Shenghui Wangd2f96f42018-12-13 22:53:46 +0800210 /*
211 * c->fill_iter can allocate an iterator with more memory space
212 * than static MAX_BSETS.
213 * See the comment arount cache_set->fill_iter.
214 */
Kent Overstreetd19936a2018-05-20 18:25:51 -0400215 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
Kent Overstreet57943512013-04-25 13:58:35 -0700216 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700217 iter->used = 0;
218
Kent Overstreet280481d2013-10-24 16:36:03 -0700219#ifdef CONFIG_BCACHE_DEBUG
Kent Overstreetc052dd92013-11-11 17:35:24 -0800220 iter->b = &b->keys;
Kent Overstreet280481d2013-10-24 16:36:03 -0700221#endif
222
Kent Overstreet57943512013-04-25 13:58:35 -0700223 if (!i->seq)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700224 goto err;
225
226 for (;
Kent Overstreeta85e9682013-12-20 17:28:16 -0800227 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700228 i = write_block(b)) {
229 err = "unsupported bset version";
230 if (i->version > BCACHE_BSET_VERSION)
231 goto err;
232
233 err = "bad btree header";
Kent Overstreetee811282013-12-17 23:49:49 -0800234 if (b->written + set_blocks(i, block_bytes(b->c)) >
235 btree_blocks(b))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700236 goto err;
237
238 err = "bad magic";
Kent Overstreet81ab4192013-10-31 15:46:42 -0700239 if (i->magic != bset_magic(&b->c->sb))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700240 goto err;
241
242 err = "bad checksum";
243 switch (i->version) {
244 case 0:
245 if (i->csum != csum_set(i))
246 goto err;
247 break;
248 case BCACHE_BSET_VERSION:
249 if (i->csum != btree_csum_set(b, i))
250 goto err;
251 break;
252 }
253
254 err = "empty set";
Kent Overstreeta85e9682013-12-20 17:28:16 -0800255 if (i != b->keys.set[0].data && !i->keys)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700256 goto err;
257
Kent Overstreetfafff812013-12-17 21:56:21 -0800258 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700259
Kent Overstreetee811282013-12-17 23:49:49 -0800260 b->written += set_blocks(i, block_bytes(b->c));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700261 }
262
263 err = "corrupted btree";
264 for (i = write_block(b);
Kent Overstreeta85e9682013-12-20 17:28:16 -0800265 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700266 i = ((void *) i) + block_bytes(b->c))
Kent Overstreeta85e9682013-12-20 17:28:16 -0800267 if (i->seq == b->keys.set[0].data->seq)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700268 goto err;
269
Kent Overstreeta85e9682013-12-20 17:28:16 -0800270 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700271
Kent Overstreeta85e9682013-12-20 17:28:16 -0800272 i = b->keys.set[0].data;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700273 err = "short btree key";
Kent Overstreeta85e9682013-12-20 17:28:16 -0800274 if (b->keys.set[0].size &&
275 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700276 goto err;
277
278 if (b->written < btree_blocks(b))
Kent Overstreeta85e9682013-12-20 17:28:16 -0800279 bch_bset_init_next(&b->keys, write_block(b),
280 bset_magic(&b->c->sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700281out:
Kent Overstreetd19936a2018-05-20 18:25:51 -0400282 mempool_free(iter, &b->c->fill_iter);
Kent Overstreet57943512013-04-25 13:58:35 -0700283 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700284err:
285 set_btree_node_io_error(b);
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800286 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
Kent Overstreetcafe5632013-03-23 16:11:31 -0700287 err, PTR_BUCKET_NR(b->c, &b->key, 0),
Kent Overstreet88b9f8c2013-12-17 21:46:35 -0800288 bset_block_offset(b, i), i->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700289 goto out;
290}
291
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200292static void btree_node_read_endio(struct bio *bio)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700293{
Kent Overstreet57943512013-04-25 13:58:35 -0700294 struct closure *cl = bio->bi_private;
Coly Li1fae7cf2018-08-11 13:19:45 +0800295
Kent Overstreet57943512013-04-25 13:58:35 -0700296 closure_put(cl);
297}
Kent Overstreetcafe5632013-03-23 16:11:31 -0700298
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800299static void bch_btree_node_read(struct btree *b)
Kent Overstreet57943512013-04-25 13:58:35 -0700300{
301 uint64_t start_time = local_clock();
302 struct closure cl;
303 struct bio *bio;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700304
Kent Overstreetc37511b2013-04-26 15:39:55 -0700305 trace_bcache_btree_read(b);
306
Kent Overstreet57943512013-04-25 13:58:35 -0700307 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700308
Kent Overstreet57943512013-04-25 13:58:35 -0700309 bio = bch_bbio_alloc(b->c);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700310 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
Kent Overstreet57943512013-04-25 13:58:35 -0700311 bio->bi_end_io = btree_node_read_endio;
312 bio->bi_private = &cl;
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600313 bio->bi_opf = REQ_OP_READ | REQ_META;
Kent Overstreet57943512013-04-25 13:58:35 -0700314
Kent Overstreeta85e9682013-12-20 17:28:16 -0800315 bch_bio_map(bio, b->keys.set[0].data);
Kent Overstreet57943512013-04-25 13:58:35 -0700316
Kent Overstreet57943512013-04-25 13:58:35 -0700317 bch_submit_bbio(bio, b->c, &b->key, 0);
318 closure_sync(&cl);
319
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200320 if (bio->bi_status)
Kent Overstreet57943512013-04-25 13:58:35 -0700321 set_btree_node_io_error(b);
322
323 bch_bbio_free(bio, b->c);
324
325 if (btree_node_io_error(b))
326 goto err;
327
328 bch_btree_node_read_done(b);
Kent Overstreet57943512013-04-25 13:58:35 -0700329 bch_time_stats_update(&b->c->btree_read_time, start_time);
Kent Overstreet57943512013-04-25 13:58:35 -0700330
331 return;
332err:
Geert Uytterhoeven61cbd252013-09-23 23:17:30 -0700333 bch_cache_set_error(b->c, "io error reading bucket %zu",
Kent Overstreet57943512013-04-25 13:58:35 -0700334 PTR_BUCKET_NR(b->c, &b->key, 0));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700335}
336
337static void btree_complete_write(struct btree *b, struct btree_write *w)
338{
339 if (w->prio_blocked &&
340 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
Kent Overstreet119ba0f2013-04-24 19:01:12 -0700341 wake_up_allocators(b->c);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700342
343 if (w->journal) {
344 atomic_dec_bug(w->journal);
345 __closure_wake_up(&b->c->journal.wait);
346 }
347
Kent Overstreetcafe5632013-03-23 16:11:31 -0700348 w->prio_blocked = 0;
349 w->journal = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700350}
351
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800352static void btree_node_write_unlock(struct closure *cl)
353{
354 struct btree *b = container_of(cl, struct btree, io);
355
356 up(&b->io_mutex);
357}
358
Kent Overstreet57943512013-04-25 13:58:35 -0700359static void __btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700360{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800361 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700362 struct btree_write *w = btree_prev_write(b);
363
364 bch_bbio_free(b->bio, b->c);
365 b->bio = NULL;
366 btree_complete_write(b, w);
367
368 if (btree_node_dirty(b))
Kent Overstreet56b30772014-01-23 01:44:55 -0800369 schedule_delayed_work(&b->work, 30 * HZ);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700370
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800371 closure_return_with_destructor(cl, btree_node_write_unlock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700372}
373
Kent Overstreet57943512013-04-25 13:58:35 -0700374static void btree_node_write_done(struct closure *cl)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700375{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800376 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700377
Guoqing Jiang491221f2016-09-22 03:10:01 -0400378 bio_free_pages(b->bio);
Kent Overstreet57943512013-04-25 13:58:35 -0700379 __btree_node_write_done(cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700380}
381
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200382static void btree_node_write_endio(struct bio *bio)
Kent Overstreet57943512013-04-25 13:58:35 -0700383{
384 struct closure *cl = bio->bi_private;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800385 struct btree *b = container_of(cl, struct btree, io);
Kent Overstreet57943512013-04-25 13:58:35 -0700386
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200387 if (bio->bi_status)
Kent Overstreet57943512013-04-25 13:58:35 -0700388 set_btree_node_io_error(b);
389
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200390 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
Kent Overstreet57943512013-04-25 13:58:35 -0700391 closure_put(cl);
392}
393
394static void do_btree_node_write(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700395{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800396 struct closure *cl = &b->io;
Kent Overstreetee811282013-12-17 23:49:49 -0800397 struct bset *i = btree_bset_last(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700398 BKEY_PADDED(key) k;
399
400 i->version = BCACHE_BSET_VERSION;
401 i->csum = btree_csum_set(b, i);
402
Kent Overstreet57943512013-04-25 13:58:35 -0700403 BUG_ON(b->bio);
404 b->bio = bch_bbio_alloc(b->c);
405
406 b->bio->bi_end_io = btree_node_write_endio;
Kent Overstreetfaadf0c2013-11-01 18:03:08 -0700407 b->bio->bi_private = cl;
Kent Overstreetee811282013-12-17 23:49:49 -0800408 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600409 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600410 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700411
Kent Overstreete49c7c32013-06-26 17:25:38 -0700412 /*
413 * If we're appending to a leaf node, we don't technically need FUA -
414 * this write just needs to be persisted before the next journal write,
415 * which will be marked FLUSH|FUA.
416 *
417 * Similarly if we're writing a new btree root - the pointer is going to
418 * be in the next journal entry.
419 *
420 * But if we're writing a new btree node (that isn't a root) or
421 * appending to a non leaf btree node, we need either FUA or a flush
422 * when we write the parent with the new pointer. FUA is cheaper than a
423 * flush, and writes appending to leaf nodes aren't blocking anything so
424 * just make all btree node writes FUA to keep things sane.
425 */
426
Kent Overstreetcafe5632013-03-23 16:11:31 -0700427 bkey_copy(&k.key, &b->key);
Kent Overstreetee811282013-12-17 23:49:49 -0800428 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
Kent Overstreeta85e9682013-12-20 17:28:16 -0800429 bset_sector_offset(&b->keys, i));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700430
Ming Lei25d8be72017-12-18 20:22:10 +0800431 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700432 int j;
433 struct bio_vec *bv;
434 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
435
Kent Overstreet79886132013-11-23 17:19:00 -0800436 bio_for_each_segment_all(bv, b->bio, j)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700437 memcpy(page_address(bv->bv_page),
438 base + j * PAGE_SIZE, PAGE_SIZE);
439
Kent Overstreetcafe5632013-03-23 16:11:31 -0700440 bch_submit_bbio(b->bio, b->c, &k.key, 0);
441
Kent Overstreet57943512013-04-25 13:58:35 -0700442 continue_at(cl, btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700443 } else {
Coly Lib0d30982018-08-11 13:19:47 +0800444 /*
445 * No problem for multipage bvec since the bio is
446 * just allocated
447 */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700448 b->bio->bi_vcnt = 0;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600449 bch_bio_map(b->bio, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700450
Kent Overstreetcafe5632013-03-23 16:11:31 -0700451 bch_submit_bbio(b->bio, b->c, &k.key, 0);
452
453 closure_sync(cl);
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800454 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700455 }
456}
457
Kent Overstreet2a285682014-03-04 16:42:42 -0800458void __bch_btree_node_write(struct btree *b, struct closure *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700459{
Kent Overstreetee811282013-12-17 23:49:49 -0800460 struct bset *i = btree_bset_last(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700461
Kent Overstreet2a285682014-03-04 16:42:42 -0800462 lockdep_assert_held(&b->write_lock);
463
Kent Overstreetc37511b2013-04-26 15:39:55 -0700464 trace_bcache_btree_write(b);
465
Kent Overstreetcafe5632013-03-23 16:11:31 -0700466 BUG_ON(current->bio_list);
Kent Overstreet57943512013-04-25 13:58:35 -0700467 BUG_ON(b->written >= btree_blocks(b));
468 BUG_ON(b->written && !i->keys);
Kent Overstreetee811282013-12-17 23:49:49 -0800469 BUG_ON(btree_bset_first(b)->seq != i->seq);
Kent Overstreetdc9d98d2013-12-17 23:47:33 -0800470 bch_check_keys(&b->keys, "writing");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700471
Kent Overstreetcafe5632013-03-23 16:11:31 -0700472 cancel_delayed_work(&b->work);
473
Kent Overstreet57943512013-04-25 13:58:35 -0700474 /* If caller isn't waiting for write, parent refcount is cache set */
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800475 down(&b->io_mutex);
476 closure_init(&b->io, parent ?: &b->c->cl);
Kent Overstreet57943512013-04-25 13:58:35 -0700477
Kent Overstreetcafe5632013-03-23 16:11:31 -0700478 clear_bit(BTREE_NODE_dirty, &b->flags);
479 change_bit(BTREE_NODE_write_idx, &b->flags);
480
Kent Overstreet57943512013-04-25 13:58:35 -0700481 do_btree_node_write(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700482
Kent Overstreetee811282013-12-17 23:49:49 -0800483 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700484 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
485
Kent Overstreeta85e9682013-12-20 17:28:16 -0800486 b->written += set_blocks(i, block_bytes(b->c));
Kent Overstreet2a285682014-03-04 16:42:42 -0800487}
Kent Overstreeta85e9682013-12-20 17:28:16 -0800488
Kent Overstreet2a285682014-03-04 16:42:42 -0800489void bch_btree_node_write(struct btree *b, struct closure *parent)
490{
Coly Li6f10f7d2018-08-11 13:19:44 +0800491 unsigned int nsets = b->keys.nsets;
Kent Overstreet2a285682014-03-04 16:42:42 -0800492
493 lockdep_assert_held(&b->lock);
494
495 __bch_btree_node_write(b, parent);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700496
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800497 /*
498 * do verify if there was more than one set initially (i.e. we did a
499 * sort) and we sorted down to a single set:
500 */
Kent Overstreet2a285682014-03-04 16:42:42 -0800501 if (nsets && !b->keys.nsets)
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800502 bch_btree_verify(b);
503
Kent Overstreet2a285682014-03-04 16:42:42 -0800504 bch_btree_init_next(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700505}
506
Kent Overstreetf269af52013-07-23 20:48:29 -0700507static void bch_btree_node_write_sync(struct btree *b)
508{
509 struct closure cl;
510
511 closure_init_stack(&cl);
Kent Overstreet2a285682014-03-04 16:42:42 -0800512
513 mutex_lock(&b->write_lock);
Kent Overstreetf269af52013-07-23 20:48:29 -0700514 bch_btree_node_write(b, &cl);
Kent Overstreet2a285682014-03-04 16:42:42 -0800515 mutex_unlock(&b->write_lock);
516
Kent Overstreetf269af52013-07-23 20:48:29 -0700517 closure_sync(&cl);
518}
519
Kent Overstreet57943512013-04-25 13:58:35 -0700520static void btree_node_write_work(struct work_struct *w)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700521{
522 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
523
Kent Overstreet2a285682014-03-04 16:42:42 -0800524 mutex_lock(&b->write_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700525 if (btree_node_dirty(b))
Kent Overstreet2a285682014-03-04 16:42:42 -0800526 __bch_btree_node_write(b, NULL);
527 mutex_unlock(&b->write_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700528}
529
Kent Overstreetc18536a2013-07-24 17:44:17 -0700530static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700531{
Kent Overstreetee811282013-12-17 23:49:49 -0800532 struct bset *i = btree_bset_last(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700533 struct btree_write *w = btree_current_write(b);
534
Kent Overstreet2a285682014-03-04 16:42:42 -0800535 lockdep_assert_held(&b->write_lock);
536
Kent Overstreet57943512013-04-25 13:58:35 -0700537 BUG_ON(!b->written);
538 BUG_ON(!i->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700539
Kent Overstreet57943512013-04-25 13:58:35 -0700540 if (!btree_node_dirty(b))
Kent Overstreet56b30772014-01-23 01:44:55 -0800541 schedule_delayed_work(&b->work, 30 * HZ);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700542
Kent Overstreet57943512013-04-25 13:58:35 -0700543 set_btree_node_dirty(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700544
Kent Overstreetc18536a2013-07-24 17:44:17 -0700545 if (journal_ref) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700546 if (w->journal &&
Kent Overstreetc18536a2013-07-24 17:44:17 -0700547 journal_pin_cmp(b->c, w->journal, journal_ref)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700548 atomic_dec_bug(w->journal);
549 w->journal = NULL;
550 }
551
552 if (!w->journal) {
Kent Overstreetc18536a2013-07-24 17:44:17 -0700553 w->journal = journal_ref;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700554 atomic_inc(w->journal);
555 }
556 }
557
Kent Overstreetcafe5632013-03-23 16:11:31 -0700558 /* Force write if set is too big */
Kent Overstreet57943512013-04-25 13:58:35 -0700559 if (set_bytes(i) > PAGE_SIZE - 48 &&
560 !current->bio_list)
561 bch_btree_node_write(b, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700562}
563
564/*
565 * Btree in memory cache - allocation/freeing
566 * mca -> memory cache
567 */
568
Kent Overstreetcafe5632013-03-23 16:11:31 -0700569#define mca_reserve(c) (((c->root && c->root->level) \
570 ? c->root->level : 1) * 8 + 16)
571#define mca_can_free(c) \
Kent Overstreet0a63b662014-03-17 17:15:53 -0700572 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700573
574static void mca_data_free(struct btree *b)
575{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800576 BUG_ON(b->io_mutex.count != 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700577
Kent Overstreeta85e9682013-12-20 17:28:16 -0800578 bch_btree_keys_free(&b->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700579
Kent Overstreet0a63b662014-03-17 17:15:53 -0700580 b->c->btree_cache_used--;
Kent Overstreetee811282013-12-17 23:49:49 -0800581 list_move(&b->list, &b->c->btree_cache_freed);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700582}
583
584static void mca_bucket_free(struct btree *b)
585{
586 BUG_ON(btree_node_dirty(b));
587
588 b->key.ptr[0] = 0;
589 hlist_del_init_rcu(&b->hash);
590 list_move(&b->list, &b->c->btree_cache_freeable);
591}
592
Coly Li6f10f7d2018-08-11 13:19:44 +0800593static unsigned int btree_order(struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700594{
595 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
596}
597
598static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
599{
Kent Overstreeta85e9682013-12-20 17:28:16 -0800600 if (!bch_btree_keys_alloc(&b->keys,
Coly Li6f10f7d2018-08-11 13:19:44 +0800601 max_t(unsigned int,
Kent Overstreetee811282013-12-17 23:49:49 -0800602 ilog2(b->c->btree_pages),
603 btree_order(k)),
604 gfp)) {
Kent Overstreet0a63b662014-03-17 17:15:53 -0700605 b->c->btree_cache_used++;
Kent Overstreetee811282013-12-17 23:49:49 -0800606 list_move(&b->list, &b->c->btree_cache);
607 } else {
608 list_move(&b->list, &b->c->btree_cache_freed);
609 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700610}
611
612static struct btree *mca_bucket_alloc(struct cache_set *c,
613 struct bkey *k, gfp_t gfp)
614{
615 struct btree *b = kzalloc(sizeof(struct btree), gfp);
Coly Li1fae7cf2018-08-11 13:19:45 +0800616
Kent Overstreetcafe5632013-03-23 16:11:31 -0700617 if (!b)
618 return NULL;
619
620 init_rwsem(&b->lock);
621 lockdep_set_novalidate_class(&b->lock);
Kent Overstreet2a285682014-03-04 16:42:42 -0800622 mutex_init(&b->write_lock);
623 lockdep_set_novalidate_class(&b->write_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700624 INIT_LIST_HEAD(&b->list);
Kent Overstreet57943512013-04-25 13:58:35 -0700625 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700626 b->c = c;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800627 sema_init(&b->io_mutex, 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700628
629 mca_data_alloc(b, k, gfp);
630 return b;
631}
632
Coly Li6f10f7d2018-08-11 13:19:44 +0800633static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700634{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700635 struct closure cl;
636
637 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700638 lockdep_assert_held(&b->c->bucket_lock);
639
640 if (!down_write_trylock(&b->lock))
641 return -ENOMEM;
642
Kent Overstreeta85e9682013-12-20 17:28:16 -0800643 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
Kent Overstreete8e1d462013-07-24 17:27:07 -0700644
Kent Overstreeta85e9682013-12-20 17:28:16 -0800645 if (b->keys.page_order < min_order)
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800646 goto out_unlock;
647
648 if (!flush) {
649 if (btree_node_dirty(b))
650 goto out_unlock;
651
652 if (down_trylock(&b->io_mutex))
653 goto out_unlock;
654 up(&b->io_mutex);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700655 }
656
Kent Overstreet2a285682014-03-04 16:42:42 -0800657 mutex_lock(&b->write_lock);
Kent Overstreetf269af52013-07-23 20:48:29 -0700658 if (btree_node_dirty(b))
Kent Overstreet2a285682014-03-04 16:42:42 -0800659 __bch_btree_node_write(b, &cl);
660 mutex_unlock(&b->write_lock);
661
662 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700663
Kent Overstreete8e1d462013-07-24 17:27:07 -0700664 /* wait for any in flight btree write */
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800665 down(&b->io_mutex);
666 up(&b->io_mutex);
Kent Overstreete8e1d462013-07-24 17:27:07 -0700667
Kent Overstreetcafe5632013-03-23 16:11:31 -0700668 return 0;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800669out_unlock:
670 rw_unlock(true, b);
671 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700672}
673
Dave Chinner7dc19d52013-08-28 10:18:11 +1000674static unsigned long bch_mca_scan(struct shrinker *shrink,
675 struct shrink_control *sc)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700676{
677 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
678 struct btree *b, *t;
679 unsigned long i, nr = sc->nr_to_scan;
Dave Chinner7dc19d52013-08-28 10:18:11 +1000680 unsigned long freed = 0;
Tang Junhuica71df32018-03-18 17:36:22 -0700681 unsigned int btree_cache_used;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700682
683 if (c->shrinker_disabled)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000684 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700685
Kent Overstreet0a63b662014-03-17 17:15:53 -0700686 if (c->btree_cache_alloc_lock)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000687 return SHRINK_STOP;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700688
689 /* Return -1 if we can't do anything right now */
Kent Overstreeta698e082013-09-23 23:17:34 -0700690 if (sc->gfp_mask & __GFP_IO)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700691 mutex_lock(&c->bucket_lock);
692 else if (!mutex_trylock(&c->bucket_lock))
693 return -1;
694
Kent Overstreet36c9ea92013-06-03 13:04:56 -0700695 /*
696 * It's _really_ critical that we don't free too many btree nodes - we
697 * have to always leave ourselves a reserve. The reserve is how we
698 * guarantee that allocating memory for a new btree node can always
699 * succeed, so that inserting keys into the btree can always succeed and
700 * IO can always make forward progress:
701 */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700702 nr /= c->btree_pages;
703 nr = min_t(unsigned long, nr, mca_can_free(c));
704
705 i = 0;
Tang Junhuica71df32018-03-18 17:36:22 -0700706 btree_cache_used = c->btree_cache_used;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700707 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
Tang Junhuica71df32018-03-18 17:36:22 -0700708 if (nr <= 0)
709 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700710
711 if (++i > 3 &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700712 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700713 mca_data_free(b);
714 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000715 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700716 }
Tang Junhuica71df32018-03-18 17:36:22 -0700717 nr--;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700718 }
719
Tang Junhuica71df32018-03-18 17:36:22 -0700720 for (; (nr--) && i < btree_cache_used; i++) {
Kent Overstreetb0f32a52013-12-10 13:24:26 -0800721 if (list_empty(&c->btree_cache))
722 goto out;
723
Kent Overstreetcafe5632013-03-23 16:11:31 -0700724 b = list_first_entry(&c->btree_cache, struct btree, list);
725 list_rotate_left(&c->btree_cache);
726
727 if (!b->accessed &&
Kent Overstreete8e1d462013-07-24 17:27:07 -0700728 !mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700729 mca_bucket_free(b);
730 mca_data_free(b);
731 rw_unlock(true, b);
Dave Chinner7dc19d52013-08-28 10:18:11 +1000732 freed++;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700733 } else
734 b->accessed = 0;
735 }
736out:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700737 mutex_unlock(&c->bucket_lock);
Tang Junhuif3641c32018-03-18 17:36:21 -0700738 return freed * c->btree_pages;
Dave Chinner7dc19d52013-08-28 10:18:11 +1000739}
740
741static unsigned long bch_mca_count(struct shrinker *shrink,
742 struct shrink_control *sc)
743{
744 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
745
746 if (c->shrinker_disabled)
747 return 0;
748
Kent Overstreet0a63b662014-03-17 17:15:53 -0700749 if (c->btree_cache_alloc_lock)
Dave Chinner7dc19d52013-08-28 10:18:11 +1000750 return 0;
751
752 return mca_can_free(c) * c->btree_pages;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700753}
754
755void bch_btree_cache_free(struct cache_set *c)
756{
757 struct btree *b;
758 struct closure cl;
Coly Li1fae7cf2018-08-11 13:19:45 +0800759
Kent Overstreetcafe5632013-03-23 16:11:31 -0700760 closure_init_stack(&cl);
761
762 if (c->shrink.list.next)
763 unregister_shrinker(&c->shrink);
764
765 mutex_lock(&c->bucket_lock);
766
767#ifdef CONFIG_BCACHE_DEBUG
768 if (c->verify_data)
769 list_move(&c->verify_data->list, &c->btree_cache);
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800770
771 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700772#endif
773
774 list_splice(&c->btree_cache_freeable,
775 &c->btree_cache);
776
777 while (!list_empty(&c->btree_cache)) {
778 b = list_first_entry(&c->btree_cache, struct btree, list);
779
780 if (btree_node_dirty(b))
781 btree_complete_write(b, btree_current_write(b));
782 clear_bit(BTREE_NODE_dirty, &b->flags);
783
784 mca_data_free(b);
785 }
786
787 while (!list_empty(&c->btree_cache_freed)) {
788 b = list_first_entry(&c->btree_cache_freed,
789 struct btree, list);
790 list_del(&b->list);
791 cancel_delayed_work_sync(&b->work);
792 kfree(b);
793 }
794
795 mutex_unlock(&c->bucket_lock);
796}
797
798int bch_btree_cache_alloc(struct cache_set *c)
799{
Coly Li6f10f7d2018-08-11 13:19:44 +0800800 unsigned int i;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700801
Kent Overstreetcafe5632013-03-23 16:11:31 -0700802 for (i = 0; i < mca_reserve(c); i++)
Kent Overstreet72a44512013-10-24 17:19:26 -0700803 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
804 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700805
806 list_splice_init(&c->btree_cache,
807 &c->btree_cache_freeable);
808
809#ifdef CONFIG_BCACHE_DEBUG
810 mutex_init(&c->verify_lock);
811
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800812 c->verify_ondisk = (void *)
813 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
814
Kent Overstreetcafe5632013-03-23 16:11:31 -0700815 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
816
817 if (c->verify_data &&
Kent Overstreeta85e9682013-12-20 17:28:16 -0800818 c->verify_data->keys.set->data)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700819 list_del_init(&c->verify_data->list);
820 else
821 c->verify_data = NULL;
822#endif
823
Dave Chinner7dc19d52013-08-28 10:18:11 +1000824 c->shrink.count_objects = bch_mca_count;
825 c->shrink.scan_objects = bch_mca_scan;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700826 c->shrink.seeks = 4;
827 c->shrink.batch = c->btree_pages * 2;
Michael Lyle6c4ca1e2017-11-24 15:14:27 -0800828
829 if (register_shrinker(&c->shrink))
830 pr_warn("bcache: %s: could not register shrinker",
831 __func__);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700832
833 return 0;
834}
835
836/* Btree in memory cache - hash table */
837
838static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
839{
840 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
841}
842
843static struct btree *mca_find(struct cache_set *c, struct bkey *k)
844{
845 struct btree *b;
846
847 rcu_read_lock();
848 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
849 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
850 goto out;
851 b = NULL;
852out:
853 rcu_read_unlock();
854 return b;
855}
856
Kent Overstreet0a63b662014-03-17 17:15:53 -0700857static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
858{
859 struct task_struct *old;
860
861 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
862 if (old && old != current) {
863 if (op)
864 prepare_to_wait(&c->btree_cache_wait, &op->wait,
865 TASK_UNINTERRUPTIBLE);
866 return -EINTR;
867 }
868
869 return 0;
870}
871
872static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
873 struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700874{
Kent Overstreete8e1d462013-07-24 17:27:07 -0700875 struct btree *b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700876
Kent Overstreetc37511b2013-04-26 15:39:55 -0700877 trace_bcache_btree_cache_cannibalize(c);
878
Kent Overstreet0a63b662014-03-17 17:15:53 -0700879 if (mca_cannibalize_lock(c, op))
880 return ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700881
Kent Overstreete8e1d462013-07-24 17:27:07 -0700882 list_for_each_entry_reverse(b, &c->btree_cache, list)
883 if (!mca_reap(b, btree_order(k), false))
884 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700885
Kent Overstreete8e1d462013-07-24 17:27:07 -0700886 list_for_each_entry_reverse(b, &c->btree_cache, list)
887 if (!mca_reap(b, btree_order(k), true))
888 return b;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700889
Kent Overstreet0a63b662014-03-17 17:15:53 -0700890 WARN(1, "btree cache cannibalize failed\n");
Kent Overstreete8e1d462013-07-24 17:27:07 -0700891 return ERR_PTR(-ENOMEM);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700892}
893
894/*
895 * We can only have one thread cannibalizing other cached btree nodes at a time,
896 * or we'll deadlock. We use an open coded mutex to ensure that, which a
897 * cannibalize_bucket() will take. This means every time we unlock the root of
898 * the btree, we need to release this lock if we have it held.
899 */
Kent Overstreetdf8e8972013-07-24 17:37:59 -0700900static void bch_cannibalize_unlock(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700901{
Kent Overstreet0a63b662014-03-17 17:15:53 -0700902 if (c->btree_cache_alloc_lock == current) {
903 c->btree_cache_alloc_lock = NULL;
904 wake_up(&c->btree_cache_wait);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700905 }
906}
907
Kent Overstreet0a63b662014-03-17 17:15:53 -0700908static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
909 struct bkey *k, int level)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700910{
911 struct btree *b;
912
Kent Overstreete8e1d462013-07-24 17:27:07 -0700913 BUG_ON(current->bio_list);
914
Kent Overstreetcafe5632013-03-23 16:11:31 -0700915 lockdep_assert_held(&c->bucket_lock);
916
917 if (mca_find(c, k))
918 return NULL;
919
920 /* btree_free() doesn't free memory; it sticks the node on the end of
921 * the list. Check if there's any freed nodes there:
922 */
923 list_for_each_entry(b, &c->btree_cache_freeable, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700924 if (!mca_reap(b, btree_order(k), false))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700925 goto out;
926
927 /* We never free struct btree itself, just the memory that holds the on
928 * disk node. Check the freed list before allocating a new one:
929 */
930 list_for_each_entry(b, &c->btree_cache_freed, list)
Kent Overstreete8e1d462013-07-24 17:27:07 -0700931 if (!mca_reap(b, 0, false)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700932 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
Kent Overstreeta85e9682013-12-20 17:28:16 -0800933 if (!b->keys.set[0].data)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700934 goto err;
935 else
936 goto out;
937 }
938
939 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
940 if (!b)
941 goto err;
942
943 BUG_ON(!down_write_trylock(&b->lock));
Kent Overstreeta85e9682013-12-20 17:28:16 -0800944 if (!b->keys.set->data)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700945 goto err;
946out:
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800947 BUG_ON(b->io_mutex.count != 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700948
949 bkey_copy(&b->key, k);
950 list_move(&b->list, &c->btree_cache);
951 hlist_del_init_rcu(&b->hash);
952 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
953
954 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
Kent Overstreetd6fd3b12013-07-24 17:20:19 -0700955 b->parent = (void *) ~0UL;
Kent Overstreeta85e9682013-12-20 17:28:16 -0800956 b->flags = 0;
957 b->written = 0;
958 b->level = level;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700959
Kent Overstreet65d45232013-12-20 17:22:05 -0800960 if (!b->level)
Kent Overstreeta85e9682013-12-20 17:28:16 -0800961 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
962 &b->c->expensive_debug_checks);
Kent Overstreet65d45232013-12-20 17:22:05 -0800963 else
Kent Overstreeta85e9682013-12-20 17:28:16 -0800964 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
965 &b->c->expensive_debug_checks);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700966
967 return b;
968err:
969 if (b)
970 rw_unlock(true, b);
971
Kent Overstreet0a63b662014-03-17 17:15:53 -0700972 b = mca_cannibalize(c, op, k);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700973 if (!IS_ERR(b))
974 goto out;
975
976 return b;
977}
978
Bart Van Assche47344e32018-03-18 17:36:29 -0700979/*
Kent Overstreetcafe5632013-03-23 16:11:31 -0700980 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
981 * in from disk if necessary.
982 *
Kent Overstreetb54d6932013-07-24 18:04:18 -0700983 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
Kent Overstreetcafe5632013-03-23 16:11:31 -0700984 *
985 * The btree node will have either a read or a write lock held, depending on
986 * level and op->lock.
987 */
Kent Overstreet0a63b662014-03-17 17:15:53 -0700988struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
Slava Pestov2452cc82014-07-12 00:22:53 -0700989 struct bkey *k, int level, bool write,
990 struct btree *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700991{
992 int i = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700993 struct btree *b;
994
995 BUG_ON(level < 0);
996retry:
997 b = mca_find(c, k);
998
999 if (!b) {
Kent Overstreet57943512013-04-25 13:58:35 -07001000 if (current->bio_list)
1001 return ERR_PTR(-EAGAIN);
1002
Kent Overstreetcafe5632013-03-23 16:11:31 -07001003 mutex_lock(&c->bucket_lock);
Kent Overstreet0a63b662014-03-17 17:15:53 -07001004 b = mca_alloc(c, op, k, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001005 mutex_unlock(&c->bucket_lock);
1006
1007 if (!b)
1008 goto retry;
1009 if (IS_ERR(b))
1010 return b;
1011
Kent Overstreet57943512013-04-25 13:58:35 -07001012 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001013
1014 if (!write)
1015 downgrade_write(&b->lock);
1016 } else {
1017 rw_lock(write, b, level);
1018 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1019 rw_unlock(write, b);
1020 goto retry;
1021 }
1022 BUG_ON(b->level != level);
1023 }
1024
Coly Lic2e8dcf2018-08-09 15:48:44 +08001025 if (btree_node_io_error(b)) {
1026 rw_unlock(write, b);
1027 return ERR_PTR(-EIO);
1028 }
1029
1030 BUG_ON(!b->written);
1031
Slava Pestov2452cc82014-07-12 00:22:53 -07001032 b->parent = parent;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001033 b->accessed = 1;
1034
Kent Overstreeta85e9682013-12-20 17:28:16 -08001035 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1036 prefetch(b->keys.set[i].tree);
1037 prefetch(b->keys.set[i].data);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001038 }
1039
Kent Overstreeta85e9682013-12-20 17:28:16 -08001040 for (; i <= b->keys.nsets; i++)
1041 prefetch(b->keys.set[i].data);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001042
Kent Overstreetcafe5632013-03-23 16:11:31 -07001043 return b;
1044}
1045
Slava Pestov2452cc82014-07-12 00:22:53 -07001046static void btree_node_prefetch(struct btree *parent, struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001047{
1048 struct btree *b;
1049
Slava Pestov2452cc82014-07-12 00:22:53 -07001050 mutex_lock(&parent->c->bucket_lock);
1051 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1052 mutex_unlock(&parent->c->bucket_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001053
1054 if (!IS_ERR_OR_NULL(b)) {
Slava Pestov2452cc82014-07-12 00:22:53 -07001055 b->parent = parent;
Kent Overstreet57943512013-04-25 13:58:35 -07001056 bch_btree_node_read(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001057 rw_unlock(true, b);
1058 }
1059}
1060
1061/* Btree alloc */
1062
Kent Overstreete8e1d462013-07-24 17:27:07 -07001063static void btree_node_free(struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001064{
Kent Overstreetc37511b2013-04-26 15:39:55 -07001065 trace_bcache_btree_node_free(b);
1066
Kent Overstreetcafe5632013-03-23 16:11:31 -07001067 BUG_ON(b == b->c->root);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001068
Kent Overstreet2a285682014-03-04 16:42:42 -08001069 mutex_lock(&b->write_lock);
1070
Kent Overstreetcafe5632013-03-23 16:11:31 -07001071 if (btree_node_dirty(b))
1072 btree_complete_write(b, btree_current_write(b));
1073 clear_bit(BTREE_NODE_dirty, &b->flags);
1074
Kent Overstreet2a285682014-03-04 16:42:42 -08001075 mutex_unlock(&b->write_lock);
1076
Kent Overstreetcafe5632013-03-23 16:11:31 -07001077 cancel_delayed_work(&b->work);
1078
1079 mutex_lock(&b->c->bucket_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001080 bch_bucket_free(b->c, &b->key);
1081 mca_bucket_free(b);
1082 mutex_unlock(&b->c->bucket_lock);
1083}
1084
Slava Pestovc5aa4a32014-04-21 18:23:12 -07001085struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
Slava Pestov2452cc82014-07-12 00:22:53 -07001086 int level, bool wait,
1087 struct btree *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001088{
1089 BKEY_PADDED(key) k;
1090 struct btree *b = ERR_PTR(-EAGAIN);
1091
1092 mutex_lock(&c->bucket_lock);
1093retry:
Slava Pestovc5aa4a32014-04-21 18:23:12 -07001094 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001095 goto err;
1096
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07001097 bkey_put(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001098 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1099
Kent Overstreet0a63b662014-03-17 17:15:53 -07001100 b = mca_alloc(c, op, &k.key, level);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001101 if (IS_ERR(b))
1102 goto err_free;
1103
1104 if (!b) {
Kent Overstreetb1a67b02013-03-25 11:46:44 -07001105 cache_bug(c,
1106 "Tried to allocate bucket that was in btree cache");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001107 goto retry;
1108 }
1109
Kent Overstreetcafe5632013-03-23 16:11:31 -07001110 b->accessed = 1;
Slava Pestov2452cc82014-07-12 00:22:53 -07001111 b->parent = parent;
Kent Overstreeta85e9682013-12-20 17:28:16 -08001112 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001113
1114 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001115
1116 trace_bcache_btree_node_alloc(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001117 return b;
1118err_free:
1119 bch_bucket_free(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001120err:
1121 mutex_unlock(&c->bucket_lock);
Kent Overstreetc37511b2013-04-26 15:39:55 -07001122
Slava Pestov913dc332014-05-23 11:18:35 -07001123 trace_bcache_btree_node_alloc_fail(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001124 return b;
1125}
1126
Slava Pestovc5aa4a32014-04-21 18:23:12 -07001127static struct btree *bch_btree_node_alloc(struct cache_set *c,
Slava Pestov2452cc82014-07-12 00:22:53 -07001128 struct btree_op *op, int level,
1129 struct btree *parent)
Slava Pestovc5aa4a32014-04-21 18:23:12 -07001130{
Slava Pestov2452cc82014-07-12 00:22:53 -07001131 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
Slava Pestovc5aa4a32014-04-21 18:23:12 -07001132}
1133
Kent Overstreet0a63b662014-03-17 17:15:53 -07001134static struct btree *btree_node_alloc_replacement(struct btree *b,
1135 struct btree_op *op)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001136{
Slava Pestov2452cc82014-07-12 00:22:53 -07001137 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
Coly Li1fae7cf2018-08-11 13:19:45 +08001138
Kent Overstreet67539e82013-09-10 22:53:34 -07001139 if (!IS_ERR_OR_NULL(n)) {
Kent Overstreet2a285682014-03-04 16:42:42 -08001140 mutex_lock(&n->write_lock);
Kent Overstreet89ebb4a2013-11-11 18:38:51 -08001141 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
Kent Overstreet67539e82013-09-10 22:53:34 -07001142 bkey_copy_key(&n->key, &b->key);
Kent Overstreet2a285682014-03-04 16:42:42 -08001143 mutex_unlock(&n->write_lock);
Kent Overstreet67539e82013-09-10 22:53:34 -07001144 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001145
1146 return n;
1147}
1148
Kent Overstreet8835c122013-07-24 23:18:05 -07001149static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1150{
Coly Li6f10f7d2018-08-11 13:19:44 +08001151 unsigned int i;
Kent Overstreet8835c122013-07-24 23:18:05 -07001152
Kent Overstreet05335cf2014-03-17 18:22:34 -07001153 mutex_lock(&b->c->bucket_lock);
1154
1155 atomic_inc(&b->c->prio_blocked);
1156
Kent Overstreet8835c122013-07-24 23:18:05 -07001157 bkey_copy(k, &b->key);
1158 bkey_copy_key(k, &ZERO_KEY);
1159
Kent Overstreet05335cf2014-03-17 18:22:34 -07001160 for (i = 0; i < KEY_PTRS(k); i++)
1161 SET_PTR_GEN(k, i,
1162 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1163 PTR_BUCKET(b->c, &b->key, i)));
Kent Overstreet8835c122013-07-24 23:18:05 -07001164
Kent Overstreet05335cf2014-03-17 18:22:34 -07001165 mutex_unlock(&b->c->bucket_lock);
Kent Overstreet8835c122013-07-24 23:18:05 -07001166}
1167
Kent Overstreet78365412013-12-17 01:29:34 -08001168static int btree_check_reserve(struct btree *b, struct btree_op *op)
1169{
1170 struct cache_set *c = b->c;
1171 struct cache *ca;
Coly Li6f10f7d2018-08-11 13:19:44 +08001172 unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
Kent Overstreet78365412013-12-17 01:29:34 -08001173
1174 mutex_lock(&c->bucket_lock);
1175
1176 for_each_cache(ca, c, i)
1177 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1178 if (op)
Kent Overstreet0a63b662014-03-17 17:15:53 -07001179 prepare_to_wait(&c->btree_cache_wait, &op->wait,
Kent Overstreet78365412013-12-17 01:29:34 -08001180 TASK_UNINTERRUPTIBLE);
Kent Overstreet0a63b662014-03-17 17:15:53 -07001181 mutex_unlock(&c->bucket_lock);
1182 return -EINTR;
Kent Overstreet78365412013-12-17 01:29:34 -08001183 }
1184
1185 mutex_unlock(&c->bucket_lock);
Kent Overstreet0a63b662014-03-17 17:15:53 -07001186
1187 return mca_cannibalize_lock(b->c, op);
Kent Overstreet78365412013-12-17 01:29:34 -08001188}
1189
Kent Overstreetcafe5632013-03-23 16:11:31 -07001190/* Garbage collection */
1191
Kent Overstreet487dded2014-03-17 15:13:26 -07001192static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1193 struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001194{
1195 uint8_t stale = 0;
Coly Li6f10f7d2018-08-11 13:19:44 +08001196 unsigned int i;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001197 struct bucket *g;
1198
1199 /*
1200 * ptr_invalid() can't return true for the keys that mark btree nodes as
1201 * freed, but since ptr_bad() returns true we'll never actually use them
1202 * for anything and thus we don't want mark their pointers here
1203 */
1204 if (!bkey_cmp(k, &ZERO_KEY))
1205 return stale;
1206
1207 for (i = 0; i < KEY_PTRS(k); i++) {
1208 if (!ptr_available(c, k, i))
1209 continue;
1210
1211 g = PTR_BUCKET(c, k, i);
1212
Kent Overstreet3a2fd9d2014-02-27 17:51:12 -08001213 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1214 g->last_gc = PTR_GEN(k, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001215
1216 if (ptr_stale(c, k, i)) {
1217 stale = max(stale, ptr_stale(c, k, i));
1218 continue;
1219 }
1220
1221 cache_bug_on(GC_MARK(g) &&
1222 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1223 c, "inconsistent ptrs: mark = %llu, level = %i",
1224 GC_MARK(g), level);
1225
1226 if (level)
1227 SET_GC_MARK(g, GC_MARK_METADATA);
1228 else if (KEY_DIRTY(k))
1229 SET_GC_MARK(g, GC_MARK_DIRTY);
Kent Overstreet4fe6a812014-03-13 13:46:29 -07001230 else if (!GC_MARK(g))
1231 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001232
1233 /* guard against overflow */
Coly Li6f10f7d2018-08-11 13:19:44 +08001234 SET_GC_SECTORS_USED(g, min_t(unsigned int,
Kent Overstreetcafe5632013-03-23 16:11:31 -07001235 GC_SECTORS_USED(g) + KEY_SIZE(k),
Darrick J. Wong94717442014-01-28 16:57:39 -08001236 MAX_GC_SECTORS_USED));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001237
1238 BUG_ON(!GC_SECTORS_USED(g));
1239 }
1240
1241 return stale;
1242}
1243
1244#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1245
Kent Overstreet487dded2014-03-17 15:13:26 -07001246void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1247{
Coly Li6f10f7d2018-08-11 13:19:44 +08001248 unsigned int i;
Kent Overstreet487dded2014-03-17 15:13:26 -07001249
1250 for (i = 0; i < KEY_PTRS(k); i++)
1251 if (ptr_available(c, k, i) &&
1252 !ptr_stale(c, k, i)) {
1253 struct bucket *b = PTR_BUCKET(c, k, i);
1254
1255 b->gen = PTR_GEN(k, i);
1256
1257 if (level && bkey_cmp(k, &ZERO_KEY))
1258 b->prio = BTREE_PRIO;
1259 else if (!level && b->prio == BTREE_PRIO)
1260 b->prio = INITIAL_PRIO;
1261 }
1262
1263 __bch_btree_mark_key(c, level, k);
1264}
1265
Tang Junhuid44c2f92017-10-30 14:46:33 -07001266void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1267{
1268 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1269}
1270
Kent Overstreeta1f03582013-09-10 19:07:00 -07001271static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001272{
1273 uint8_t stale = 0;
Coly Li6f10f7d2018-08-11 13:19:44 +08001274 unsigned int keys = 0, good_keys = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001275 struct bkey *k;
1276 struct btree_iter iter;
1277 struct bset_tree *t;
1278
1279 gc->nodes++;
1280
Kent Overstreetc052dd92013-11-11 17:35:24 -08001281 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001282 stale = max(stale, btree_mark_key(b, k));
Kent Overstreeta1f03582013-09-10 19:07:00 -07001283 keys++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001284
Kent Overstreeta85e9682013-12-20 17:28:16 -08001285 if (bch_ptr_bad(&b->keys, k))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001286 continue;
1287
Kent Overstreetcafe5632013-03-23 16:11:31 -07001288 gc->key_bytes += bkey_u64s(k);
1289 gc->nkeys++;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001290 good_keys++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001291
1292 gc->data += KEY_SIZE(k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001293 }
1294
Kent Overstreeta85e9682013-12-20 17:28:16 -08001295 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001296 btree_bug_on(t->size &&
Kent Overstreeta85e9682013-12-20 17:28:16 -08001297 bset_written(&b->keys, t) &&
Kent Overstreetcafe5632013-03-23 16:11:31 -07001298 bkey_cmp(&b->key, &t->end) < 0,
1299 b, "found short btree key in gc");
1300
Kent Overstreeta1f03582013-09-10 19:07:00 -07001301 if (b->c->gc_always_rewrite)
1302 return true;
1303
1304 if (stale > 10)
1305 return true;
1306
1307 if ((keys - good_keys) * 2 > keys)
1308 return true;
1309
1310 return false;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001311}
1312
Kent Overstreeta1f03582013-09-10 19:07:00 -07001313#define GC_MERGE_NODES 4U
Kent Overstreetcafe5632013-03-23 16:11:31 -07001314
1315struct gc_merge_info {
1316 struct btree *b;
Coly Li6f10f7d2018-08-11 13:19:44 +08001317 unsigned int keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001318};
1319
Coly Lifc2d5982018-08-11 13:19:46 +08001320static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1321 struct keylist *insert_keys,
1322 atomic_t *journal_ref,
1323 struct bkey *replace_key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07001324
Kent Overstreeta1f03582013-09-10 19:07:00 -07001325static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
Kent Overstreet0a63b662014-03-17 17:15:53 -07001326 struct gc_stat *gc, struct gc_merge_info *r)
Kent Overstreeta1f03582013-09-10 19:07:00 -07001327{
Coly Li6f10f7d2018-08-11 13:19:44 +08001328 unsigned int i, nodes = 0, keys = 0, blocks;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001329 struct btree *new_nodes[GC_MERGE_NODES];
Kent Overstreet0a63b662014-03-17 17:15:53 -07001330 struct keylist keylist;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001331 struct closure cl;
1332 struct bkey *k;
1333
Kent Overstreet0a63b662014-03-17 17:15:53 -07001334 bch_keylist_init(&keylist);
1335
1336 if (btree_check_reserve(b, NULL))
1337 return 0;
1338
Kent Overstreeta1f03582013-09-10 19:07:00 -07001339 memset(new_nodes, 0, sizeof(new_nodes));
Kent Overstreetb54d6932013-07-24 18:04:18 -07001340 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001341
Kent Overstreeta1f03582013-09-10 19:07:00 -07001342 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001343 keys += r[nodes++].keys;
1344
1345 blocks = btree_default_blocks(b->c) * 2 / 3;
1346
1347 if (nodes < 2 ||
Kent Overstreeta85e9682013-12-20 17:28:16 -08001348 __set_blocks(b->keys.set[0].data, keys,
Kent Overstreetee811282013-12-17 23:49:49 -08001349 block_bytes(b->c)) > blocks * (nodes - 1))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001350 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001351
Kent Overstreeta1f03582013-09-10 19:07:00 -07001352 for (i = 0; i < nodes; i++) {
Kent Overstreet0a63b662014-03-17 17:15:53 -07001353 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001354 if (IS_ERR_OR_NULL(new_nodes[i]))
1355 goto out_nocoalesce;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001356 }
1357
Kent Overstreet0a63b662014-03-17 17:15:53 -07001358 /*
1359 * We have to check the reserve here, after we've allocated our new
1360 * nodes, to make sure the insert below will succeed - we also check
1361 * before as an optimization to potentially avoid a bunch of expensive
1362 * allocs/sorts
1363 */
1364 if (btree_check_reserve(b, NULL))
1365 goto out_nocoalesce;
1366
Kent Overstreet2a285682014-03-04 16:42:42 -08001367 for (i = 0; i < nodes; i++)
1368 mutex_lock(&new_nodes[i]->write_lock);
1369
Kent Overstreetcafe5632013-03-23 16:11:31 -07001370 for (i = nodes - 1; i > 0; --i) {
Kent Overstreetee811282013-12-17 23:49:49 -08001371 struct bset *n1 = btree_bset_first(new_nodes[i]);
1372 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001373 struct bkey *k, *last = NULL;
1374
1375 keys = 0;
1376
Kent Overstreeta1f03582013-09-10 19:07:00 -07001377 if (i > 1) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001378 for (k = n2->start;
Kent Overstreetfafff812013-12-17 21:56:21 -08001379 k < bset_bkey_last(n2);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001380 k = bkey_next(k)) {
1381 if (__set_blocks(n1, n1->keys + keys +
Kent Overstreetee811282013-12-17 23:49:49 -08001382 bkey_u64s(k),
1383 block_bytes(b->c)) > blocks)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001384 break;
1385
1386 last = k;
1387 keys += bkey_u64s(k);
1388 }
Kent Overstreeta1f03582013-09-10 19:07:00 -07001389 } else {
1390 /*
1391 * Last node we're not getting rid of - we're getting
1392 * rid of the node at r[0]. Have to try and fit all of
1393 * the remaining keys into this node; we can't ensure
1394 * they will always fit due to rounding and variable
1395 * length keys (shouldn't be possible in practice,
1396 * though)
1397 */
1398 if (__set_blocks(n1, n1->keys + n2->keys,
Kent Overstreetee811282013-12-17 23:49:49 -08001399 block_bytes(b->c)) >
1400 btree_blocks(new_nodes[i]))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001401 goto out_nocoalesce;
1402
1403 keys = n2->keys;
1404 /* Take the key of the node we're getting rid of */
1405 last = &r->b->key;
1406 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001407
Kent Overstreetee811282013-12-17 23:49:49 -08001408 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1409 btree_blocks(new_nodes[i]));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001410
Kent Overstreeta1f03582013-09-10 19:07:00 -07001411 if (last)
1412 bkey_copy_key(&new_nodes[i]->key, last);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001413
Kent Overstreetfafff812013-12-17 21:56:21 -08001414 memcpy(bset_bkey_last(n1),
Kent Overstreetcafe5632013-03-23 16:11:31 -07001415 n2->start,
Kent Overstreetfafff812013-12-17 21:56:21 -08001416 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001417
1418 n1->keys += keys;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001419 r[i].keys = n1->keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001420
1421 memmove(n2->start,
Kent Overstreetfafff812013-12-17 21:56:21 -08001422 bset_bkey_idx(n2, keys),
1423 (void *) bset_bkey_last(n2) -
1424 (void *) bset_bkey_idx(n2, keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001425
1426 n2->keys -= keys;
1427
Kent Overstreet0a63b662014-03-17 17:15:53 -07001428 if (__bch_keylist_realloc(&keylist,
Kent Overstreet085d2a32013-11-11 18:20:51 -08001429 bkey_u64s(&new_nodes[i]->key)))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001430 goto out_nocoalesce;
1431
1432 bch_btree_node_write(new_nodes[i], &cl);
Kent Overstreet0a63b662014-03-17 17:15:53 -07001433 bch_keylist_add(&keylist, &new_nodes[i]->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001434 }
1435
Kent Overstreet2a285682014-03-04 16:42:42 -08001436 for (i = 0; i < nodes; i++)
1437 mutex_unlock(&new_nodes[i]->write_lock);
1438
Kent Overstreet05335cf2014-03-17 18:22:34 -07001439 closure_sync(&cl);
1440
1441 /* We emptied out this node */
1442 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1443 btree_node_free(new_nodes[0]);
1444 rw_unlock(true, new_nodes[0]);
Slava Pestov400ffaa2014-07-12 21:53:11 -07001445 new_nodes[0] = NULL;
Kent Overstreet05335cf2014-03-17 18:22:34 -07001446
Kent Overstreeta1f03582013-09-10 19:07:00 -07001447 for (i = 0; i < nodes; i++) {
Kent Overstreet0a63b662014-03-17 17:15:53 -07001448 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001449 goto out_nocoalesce;
1450
Kent Overstreet0a63b662014-03-17 17:15:53 -07001451 make_btree_freeing_key(r[i].b, keylist.top);
1452 bch_keylist_push(&keylist);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001453 }
1454
Kent Overstreet0a63b662014-03-17 17:15:53 -07001455 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1456 BUG_ON(!bch_keylist_empty(&keylist));
Kent Overstreeta1f03582013-09-10 19:07:00 -07001457
1458 for (i = 0; i < nodes; i++) {
1459 btree_node_free(r[i].b);
1460 rw_unlock(true, r[i].b);
1461
1462 r[i].b = new_nodes[i];
1463 }
1464
Kent Overstreeta1f03582013-09-10 19:07:00 -07001465 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1466 r[nodes - 1].b = ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001467
Kent Overstreetc37511b2013-04-26 15:39:55 -07001468 trace_bcache_btree_gc_coalesce(nodes);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001469 gc->nodes--;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001470
Kent Overstreet0a63b662014-03-17 17:15:53 -07001471 bch_keylist_free(&keylist);
1472
Kent Overstreeta1f03582013-09-10 19:07:00 -07001473 /* Invalidated our iterator */
1474 return -EINTR;
1475
1476out_nocoalesce:
1477 closure_sync(&cl);
Kent Overstreet0a63b662014-03-17 17:15:53 -07001478 bch_keylist_free(&keylist);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001479
Kent Overstreet0a63b662014-03-17 17:15:53 -07001480 while ((k = bch_keylist_pop(&keylist)))
Kent Overstreeta1f03582013-09-10 19:07:00 -07001481 if (!bkey_cmp(k, &ZERO_KEY))
1482 atomic_dec(&b->c->prio_blocked);
1483
1484 for (i = 0; i < nodes; i++)
1485 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1486 btree_node_free(new_nodes[i]);
1487 rw_unlock(true, new_nodes[i]);
1488 }
1489 return 0;
1490}
1491
Kent Overstreet0a63b662014-03-17 17:15:53 -07001492static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1493 struct btree *replace)
1494{
1495 struct keylist keys;
1496 struct btree *n;
1497
1498 if (btree_check_reserve(b, NULL))
1499 return 0;
1500
1501 n = btree_node_alloc_replacement(replace, NULL);
1502
1503 /* recheck reserve after allocating replacement node */
1504 if (btree_check_reserve(b, NULL)) {
1505 btree_node_free(n);
1506 rw_unlock(true, n);
1507 return 0;
1508 }
1509
1510 bch_btree_node_write_sync(n);
1511
1512 bch_keylist_init(&keys);
1513 bch_keylist_add(&keys, &n->key);
1514
1515 make_btree_freeing_key(replace, keys.top);
1516 bch_keylist_push(&keys);
1517
1518 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1519 BUG_ON(!bch_keylist_empty(&keys));
1520
1521 btree_node_free(replace);
1522 rw_unlock(true, n);
1523
1524 /* Invalidated our iterator */
1525 return -EINTR;
1526}
1527
Coly Li6f10f7d2018-08-11 13:19:44 +08001528static unsigned int btree_gc_count_keys(struct btree *b)
Kent Overstreeta1f03582013-09-10 19:07:00 -07001529{
1530 struct bkey *k;
1531 struct btree_iter iter;
Coly Li6f10f7d2018-08-11 13:19:44 +08001532 unsigned int ret = 0;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001533
Kent Overstreetc052dd92013-11-11 17:35:24 -08001534 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
Kent Overstreeta1f03582013-09-10 19:07:00 -07001535 ret += bkey_u64s(k);
1536
1537 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001538}
1539
Tang Junhui7f4a59d2018-07-26 12:17:35 +08001540static size_t btree_gc_min_nodes(struct cache_set *c)
1541{
1542 size_t min_nodes;
1543
1544 /*
1545 * Since incremental GC would stop 100ms when front
1546 * side I/O comes, so when there are many btree nodes,
1547 * if GC only processes constant (100) nodes each time,
1548 * GC would last a long time, and the front side I/Os
1549 * would run out of the buckets (since no new bucket
1550 * can be allocated during GC), and be blocked again.
1551 * So GC should not process constant nodes, but varied
1552 * nodes according to the number of btree nodes, which
1553 * realized by dividing GC into constant(100) times,
1554 * so when there are many btree nodes, GC can process
1555 * more nodes each time, otherwise, GC will process less
1556 * nodes each time (but no less than MIN_GC_NODES)
1557 */
1558 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1559 if (min_nodes < MIN_GC_NODES)
1560 min_nodes = MIN_GC_NODES;
1561
1562 return min_nodes;
1563}
1564
1565
Kent Overstreetcafe5632013-03-23 16:11:31 -07001566static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1567 struct closure *writes, struct gc_stat *gc)
1568{
Kent Overstreeta1f03582013-09-10 19:07:00 -07001569 int ret = 0;
1570 bool should_rewrite;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001571 struct bkey *k;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001572 struct btree_iter iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001573 struct gc_merge_info r[GC_MERGE_NODES];
Kent Overstreet2a285682014-03-04 16:42:42 -08001574 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001575
Kent Overstreetc052dd92013-11-11 17:35:24 -08001576 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001577
Kent Overstreet2a285682014-03-04 16:42:42 -08001578 for (i = r; i < r + ARRAY_SIZE(r); i++)
1579 i->b = ERR_PTR(-EINTR);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001580
Kent Overstreeta1f03582013-09-10 19:07:00 -07001581 while (1) {
Kent Overstreeta85e9682013-12-20 17:28:16 -08001582 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001583 if (k) {
Kent Overstreet0a63b662014-03-17 17:15:53 -07001584 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
Slava Pestov2452cc82014-07-12 00:22:53 -07001585 true, b);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001586 if (IS_ERR(r->b)) {
1587 ret = PTR_ERR(r->b);
1588 break;
1589 }
1590
1591 r->keys = btree_gc_count_keys(r->b);
1592
Kent Overstreet0a63b662014-03-17 17:15:53 -07001593 ret = btree_gc_coalesce(b, op, gc, r);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001594 if (ret)
1595 break;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001596 }
1597
Kent Overstreeta1f03582013-09-10 19:07:00 -07001598 if (!last->b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001599 break;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001600
1601 if (!IS_ERR(last->b)) {
1602 should_rewrite = btree_gc_mark_node(last->b, gc);
Kent Overstreet0a63b662014-03-17 17:15:53 -07001603 if (should_rewrite) {
1604 ret = btree_gc_rewrite_node(b, op, last->b);
1605 if (ret)
Kent Overstreeta1f03582013-09-10 19:07:00 -07001606 break;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001607 }
1608
1609 if (last->b->level) {
1610 ret = btree_gc_recurse(last->b, op, writes, gc);
1611 if (ret)
1612 break;
1613 }
1614
1615 bkey_copy_key(&b->c->gc_done, &last->b->key);
1616
1617 /*
1618 * Must flush leaf nodes before gc ends, since replace
1619 * operations aren't journalled
1620 */
Kent Overstreet2a285682014-03-04 16:42:42 -08001621 mutex_lock(&last->b->write_lock);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001622 if (btree_node_dirty(last->b))
1623 bch_btree_node_write(last->b, writes);
Kent Overstreet2a285682014-03-04 16:42:42 -08001624 mutex_unlock(&last->b->write_lock);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001625 rw_unlock(true, last->b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001626 }
1627
Kent Overstreeta1f03582013-09-10 19:07:00 -07001628 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1629 r->b = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001630
Tang Junhui5c25c4f2018-07-26 12:17:34 +08001631 if (atomic_read(&b->c->search_inflight) &&
Tang Junhui7f4a59d2018-07-26 12:17:35 +08001632 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
Tang Junhui5c25c4f2018-07-26 12:17:34 +08001633 gc->nodes_pre = gc->nodes;
1634 ret = -EAGAIN;
1635 break;
1636 }
1637
Kent Overstreetcafe5632013-03-23 16:11:31 -07001638 if (need_resched()) {
1639 ret = -EAGAIN;
1640 break;
1641 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001642 }
1643
Kent Overstreet2a285682014-03-04 16:42:42 -08001644 for (i = r; i < r + ARRAY_SIZE(r); i++)
1645 if (!IS_ERR_OR_NULL(i->b)) {
1646 mutex_lock(&i->b->write_lock);
1647 if (btree_node_dirty(i->b))
1648 bch_btree_node_write(i->b, writes);
1649 mutex_unlock(&i->b->write_lock);
1650 rw_unlock(true, i->b);
Kent Overstreeta1f03582013-09-10 19:07:00 -07001651 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001652
Kent Overstreetcafe5632013-03-23 16:11:31 -07001653 return ret;
1654}
1655
1656static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1657 struct closure *writes, struct gc_stat *gc)
1658{
1659 struct btree *n = NULL;
Kent Overstreeta1f03582013-09-10 19:07:00 -07001660 int ret = 0;
1661 bool should_rewrite;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001662
Kent Overstreeta1f03582013-09-10 19:07:00 -07001663 should_rewrite = btree_gc_mark_node(b, gc);
1664 if (should_rewrite) {
Kent Overstreet0a63b662014-03-17 17:15:53 -07001665 n = btree_node_alloc_replacement(b, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001666
Kent Overstreeta1f03582013-09-10 19:07:00 -07001667 if (!IS_ERR_OR_NULL(n)) {
1668 bch_btree_node_write_sync(n);
Kent Overstreet2a285682014-03-04 16:42:42 -08001669
Kent Overstreeta1f03582013-09-10 19:07:00 -07001670 bch_btree_set_root(n);
1671 btree_node_free(b);
1672 rw_unlock(true, n);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001673
Kent Overstreeta1f03582013-09-10 19:07:00 -07001674 return -EINTR;
1675 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001676 }
1677
Kent Overstreet487dded2014-03-17 15:13:26 -07001678 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1679
Kent Overstreeta1f03582013-09-10 19:07:00 -07001680 if (b->level) {
1681 ret = btree_gc_recurse(b, op, writes, gc);
1682 if (ret)
1683 return ret;
1684 }
1685
1686 bkey_copy_key(&b->c->gc_done, &b->key);
1687
Kent Overstreetcafe5632013-03-23 16:11:31 -07001688 return ret;
1689}
1690
1691static void btree_gc_start(struct cache_set *c)
1692{
1693 struct cache *ca;
1694 struct bucket *b;
Coly Li6f10f7d2018-08-11 13:19:44 +08001695 unsigned int i;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001696
1697 if (!c->gc_mark_valid)
1698 return;
1699
1700 mutex_lock(&c->bucket_lock);
1701
1702 c->gc_mark_valid = 0;
1703 c->gc_done = ZERO_KEY;
1704
1705 for_each_cache(ca, c, i)
1706 for_each_bucket(b, ca) {
Kent Overstreet3a2fd9d2014-02-27 17:51:12 -08001707 b->last_gc = b->gen;
Kent Overstreet29ebf462013-07-11 19:43:21 -07001708 if (!atomic_read(&b->pin)) {
Kent Overstreet4fe6a812014-03-13 13:46:29 -07001709 SET_GC_MARK(b, 0);
Kent Overstreet29ebf462013-07-11 19:43:21 -07001710 SET_GC_SECTORS_USED(b, 0);
1711 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001712 }
1713
Kent Overstreetcafe5632013-03-23 16:11:31 -07001714 mutex_unlock(&c->bucket_lock);
1715}
1716
Tang Junhuid44c2f92017-10-30 14:46:33 -07001717static void bch_btree_gc_finish(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001718{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001719 struct bucket *b;
1720 struct cache *ca;
Coly Li6f10f7d2018-08-11 13:19:44 +08001721 unsigned int i;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001722
1723 mutex_lock(&c->bucket_lock);
1724
1725 set_gc_sectors(c);
1726 c->gc_mark_valid = 1;
1727 c->need_gc = 0;
1728
Kent Overstreetcafe5632013-03-23 16:11:31 -07001729 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1730 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1731 GC_MARK_METADATA);
1732
Nicholas Swensonbf0a6282013-11-26 19:14:23 -08001733 /* don't reclaim buckets to which writeback keys point */
1734 rcu_read_lock();
Coly Li28312312018-01-08 12:21:28 -08001735 for (i = 0; i < c->devices_max_used; i++) {
Nicholas Swensonbf0a6282013-11-26 19:14:23 -08001736 struct bcache_device *d = c->devices[i];
1737 struct cached_dev *dc;
1738 struct keybuf_key *w, *n;
Coly Li6f10f7d2018-08-11 13:19:44 +08001739 unsigned int j;
Nicholas Swensonbf0a6282013-11-26 19:14:23 -08001740
1741 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1742 continue;
1743 dc = container_of(d, struct cached_dev, disk);
1744
1745 spin_lock(&dc->writeback_keys.lock);
1746 rbtree_postorder_for_each_entry_safe(w, n,
1747 &dc->writeback_keys.keys, node)
1748 for (j = 0; j < KEY_PTRS(&w->key); j++)
1749 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1750 GC_MARK_DIRTY);
1751 spin_unlock(&dc->writeback_keys.lock);
1752 }
1753 rcu_read_unlock();
1754
Tang Junhuid44c2f92017-10-30 14:46:33 -07001755 c->avail_nbuckets = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001756 for_each_cache(ca, c, i) {
1757 uint64_t *i;
1758
1759 ca->invalidate_needs_gc = 0;
1760
1761 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1762 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1763
1764 for (i = ca->prio_buckets;
1765 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1766 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1767
1768 for_each_bucket(b, ca) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001769 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1770
Kent Overstreet4fe6a812014-03-13 13:46:29 -07001771 if (atomic_read(&b->pin))
1772 continue;
1773
1774 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1775
1776 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
Tang Junhuid44c2f92017-10-30 14:46:33 -07001777 c->avail_nbuckets++;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001778 }
1779 }
1780
Kent Overstreetcafe5632013-03-23 16:11:31 -07001781 mutex_unlock(&c->bucket_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001782}
1783
Kent Overstreet72a44512013-10-24 17:19:26 -07001784static void bch_btree_gc(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001785{
Kent Overstreetcafe5632013-03-23 16:11:31 -07001786 int ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001787 struct gc_stat stats;
1788 struct closure writes;
1789 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001790 uint64_t start_time = local_clock();
Kent Overstreet57943512013-04-25 13:58:35 -07001791
Kent Overstreetc37511b2013-04-26 15:39:55 -07001792 trace_bcache_gc_start(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001793
1794 memset(&stats, 0, sizeof(struct gc_stat));
1795 closure_init_stack(&writes);
Kent Overstreetb54d6932013-07-24 18:04:18 -07001796 bch_btree_op_init(&op, SHRT_MAX);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001797
1798 btree_gc_start(c);
1799
Coly Li771f3932018-03-18 17:36:17 -07001800 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
Kent Overstreeta1f03582013-09-10 19:07:00 -07001801 do {
1802 ret = btree_root(gc_root, c, &op, &writes, &stats);
1803 closure_sync(&writes);
Kent Overstreetc5f1e5a2015-11-29 17:18:33 -08001804 cond_resched();
Kent Overstreet57943512013-04-25 13:58:35 -07001805
Tang Junhui5c25c4f2018-07-26 12:17:34 +08001806 if (ret == -EAGAIN)
1807 schedule_timeout_interruptible(msecs_to_jiffies
1808 (GC_SLEEP_MS));
1809 else if (ret)
Kent Overstreeta1f03582013-09-10 19:07:00 -07001810 pr_warn("gc failed!");
Coly Li771f3932018-03-18 17:36:17 -07001811 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001812
Tang Junhuid44c2f92017-10-30 14:46:33 -07001813 bch_btree_gc_finish(c);
Kent Overstreet57943512013-04-25 13:58:35 -07001814 wake_up_allocators(c);
1815
Kent Overstreet169ef1c2013-03-28 12:50:55 -06001816 bch_time_stats_update(&c->btree_gc_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001817
1818 stats.key_bytes *= sizeof(uint64_t);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001819 stats.data <<= 9;
Tang Junhuid44c2f92017-10-30 14:46:33 -07001820 bch_update_bucket_in_use(c, &stats);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001821 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001822
Kent Overstreetc37511b2013-04-26 15:39:55 -07001823 trace_bcache_gc_end(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001824
Kent Overstreet72a44512013-10-24 17:19:26 -07001825 bch_moving_gc(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001826}
1827
Kent Overstreetbe628be2016-10-26 20:31:17 -07001828static bool gc_should_run(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001829{
Kent Overstreeta1f03582013-09-10 19:07:00 -07001830 struct cache *ca;
Coly Li6f10f7d2018-08-11 13:19:44 +08001831 unsigned int i;
Kent Overstreet72a44512013-10-24 17:19:26 -07001832
Kent Overstreetbe628be2016-10-26 20:31:17 -07001833 for_each_cache(ca, c, i)
1834 if (ca->invalidate_needs_gc)
1835 return true;
Kent Overstreet72a44512013-10-24 17:19:26 -07001836
Kent Overstreetbe628be2016-10-26 20:31:17 -07001837 if (atomic_read(&c->sectors_to_gc) < 0)
1838 return true;
1839
1840 return false;
1841}
1842
1843static int bch_gc_thread(void *arg)
1844{
1845 struct cache_set *c = arg;
1846
1847 while (1) {
1848 wait_event_interruptible(c->gc_wait,
Coly Li771f3932018-03-18 17:36:17 -07001849 kthread_should_stop() ||
1850 test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1851 gc_should_run(c));
Kent Overstreetbe628be2016-10-26 20:31:17 -07001852
Coly Li771f3932018-03-18 17:36:17 -07001853 if (kthread_should_stop() ||
1854 test_bit(CACHE_SET_IO_DISABLE, &c->flags))
Kent Overstreet72a44512013-10-24 17:19:26 -07001855 break;
1856
Kent Overstreetbe628be2016-10-26 20:31:17 -07001857 set_gc_sectors(c);
1858 bch_btree_gc(c);
Kent Overstreet72a44512013-10-24 17:19:26 -07001859 }
1860
Coly Li771f3932018-03-18 17:36:17 -07001861 wait_for_kthread_stop();
Kent Overstreet72a44512013-10-24 17:19:26 -07001862 return 0;
1863}
1864
1865int bch_gc_thread_start(struct cache_set *c)
1866{
Kent Overstreetbe628be2016-10-26 20:31:17 -07001867 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
Vasyl Gomonovych9d134112018-01-08 12:21:20 -08001868 return PTR_ERR_OR_ZERO(c->gc_thread);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001869}
1870
1871/* Initial partial gc */
1872
Kent Overstreet487dded2014-03-17 15:13:26 -07001873static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001874{
Kent Overstreet50310162013-09-10 17:18:59 -07001875 int ret = 0;
Kent Overstreet50310162013-09-10 17:18:59 -07001876 struct bkey *k, *p = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001877 struct btree_iter iter;
1878
Kent Overstreet487dded2014-03-17 15:13:26 -07001879 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1880 bch_initial_mark_key(b->c, b->level, k);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001881
Kent Overstreet487dded2014-03-17 15:13:26 -07001882 bch_initial_mark_key(b->c, b->level + 1, &b->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001883
1884 if (b->level) {
Kent Overstreetc052dd92013-11-11 17:35:24 -08001885 bch_btree_iter_init(&b->keys, &iter, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001886
Kent Overstreet50310162013-09-10 17:18:59 -07001887 do {
Kent Overstreeta85e9682013-12-20 17:28:16 -08001888 k = bch_btree_iter_next_filter(&iter, &b->keys,
1889 bch_ptr_bad);
Tang Junhui7f4a59d2018-07-26 12:17:35 +08001890 if (k) {
Slava Pestov2452cc82014-07-12 00:22:53 -07001891 btree_node_prefetch(b, k);
Tang Junhui7f4a59d2018-07-26 12:17:35 +08001892 /*
1893 * initiallize c->gc_stats.nodes
1894 * for incremental GC
1895 */
1896 b->c->gc_stats.nodes++;
1897 }
Kent Overstreet50310162013-09-10 17:18:59 -07001898
Kent Overstreetcafe5632013-03-23 16:11:31 -07001899 if (p)
Kent Overstreet487dded2014-03-17 15:13:26 -07001900 ret = btree(check_recurse, p, b, op);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001901
Kent Overstreet50310162013-09-10 17:18:59 -07001902 p = k;
1903 } while (p && !ret);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001904 }
1905
Kent Overstreet487dded2014-03-17 15:13:26 -07001906 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001907}
1908
Kent Overstreetc18536a2013-07-24 17:44:17 -07001909int bch_btree_check(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001910{
Kent Overstreetc18536a2013-07-24 17:44:17 -07001911 struct btree_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001912
Kent Overstreetb54d6932013-07-24 18:04:18 -07001913 bch_btree_op_init(&op, SHRT_MAX);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001914
Kent Overstreet487dded2014-03-17 15:13:26 -07001915 return btree_root(check_recurse, c, &op);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001916}
1917
Kent Overstreet2531d9ee2014-03-17 16:55:55 -07001918void bch_initial_gc_finish(struct cache_set *c)
1919{
1920 struct cache *ca;
1921 struct bucket *b;
Coly Li6f10f7d2018-08-11 13:19:44 +08001922 unsigned int i;
Kent Overstreet2531d9ee2014-03-17 16:55:55 -07001923
1924 bch_btree_gc_finish(c);
1925
1926 mutex_lock(&c->bucket_lock);
1927
1928 /*
1929 * We need to put some unused buckets directly on the prio freelist in
1930 * order to get the allocator thread started - it needs freed buckets in
1931 * order to rewrite the prios and gens, and it needs to rewrite prios
1932 * and gens in order to free buckets.
1933 *
1934 * This is only safe for buckets that have no live data in them, which
1935 * there should always be some of.
1936 */
1937 for_each_cache(ca, c, i) {
1938 for_each_bucket(b, ca) {
Tang Junhui682811b2018-02-07 11:41:43 -08001939 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1940 fifo_full(&ca->free[RESERVE_BTREE]))
Kent Overstreet2531d9ee2014-03-17 16:55:55 -07001941 break;
1942
1943 if (bch_can_invalidate_bucket(ca, b) &&
1944 !GC_MARK(b)) {
1945 __bch_invalidate_one_bucket(ca, b);
Tang Junhui682811b2018-02-07 11:41:43 -08001946 if (!fifo_push(&ca->free[RESERVE_PRIO],
1947 b - ca->buckets))
1948 fifo_push(&ca->free[RESERVE_BTREE],
1949 b - ca->buckets);
Kent Overstreet2531d9ee2014-03-17 16:55:55 -07001950 }
1951 }
1952 }
1953
1954 mutex_unlock(&c->bucket_lock);
1955}
1956
Kent Overstreetcafe5632013-03-23 16:11:31 -07001957/* Btree insertion */
1958
Kent Overstreet829a60b2013-11-11 17:02:31 -08001959static bool btree_insert_key(struct btree *b, struct bkey *k,
1960 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001961{
Coly Li6f10f7d2018-08-11 13:19:44 +08001962 unsigned int status;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001963
1964 BUG_ON(bkey_cmp(k, &b->key) > 0);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001965
Kent Overstreet829a60b2013-11-11 17:02:31 -08001966 status = bch_btree_insert_key(&b->keys, k, replace_key);
1967 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1968 bch_check_keys(&b->keys, "%u for %s", status,
1969 replace_key ? "replace" : "insert");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001970
Kent Overstreet829a60b2013-11-11 17:02:31 -08001971 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1972 status);
1973 return true;
1974 } else
1975 return false;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001976}
1977
Kent Overstreet59158fd2013-11-11 19:03:54 -08001978static size_t insert_u64s_remaining(struct btree *b)
1979{
Kent Overstreet35723242014-01-10 18:53:02 -08001980 long ret = bch_btree_keys_u64s_remaining(&b->keys);
Kent Overstreet59158fd2013-11-11 19:03:54 -08001981
1982 /*
1983 * Might land in the middle of an existing extent and have to split it
1984 */
1985 if (b->keys.ops->is_extents)
1986 ret -= KEY_MAX_U64S;
1987
1988 return max(ret, 0L);
1989}
1990
Kent Overstreet26c949f2013-09-10 18:41:15 -07001991static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
Kent Overstreet1b207d82013-09-10 18:52:54 -07001992 struct keylist *insert_keys,
1993 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001994{
1995 bool ret = false;
Kent Overstreetdc9d98d2013-12-17 23:47:33 -08001996 int oldsize = bch_count_data(&b->keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001997
Kent Overstreet26c949f2013-09-10 18:41:15 -07001998 while (!bch_keylist_empty(insert_keys)) {
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07001999 struct bkey *k = insert_keys->keys;
Kent Overstreet26c949f2013-09-10 18:41:15 -07002000
Kent Overstreet59158fd2013-11-11 19:03:54 -08002001 if (bkey_u64s(k) > insert_u64s_remaining(b))
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002002 break;
2003
2004 if (bkey_cmp(k, &b->key) <= 0) {
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07002005 if (!b->level)
2006 bkey_put(b->c, k);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002007
Kent Overstreet829a60b2013-11-11 17:02:31 -08002008 ret |= btree_insert_key(b, k, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002009 bch_keylist_pop_front(insert_keys);
2010 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
Kent Overstreet26c949f2013-09-10 18:41:15 -07002011 BKEY_PADDED(key) temp;
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002012 bkey_copy(&temp.key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002013
2014 bch_cut_back(&b->key, &temp.key);
Kent Overstreetc2f95ae2013-07-24 17:24:25 -07002015 bch_cut_front(&b->key, insert_keys->keys);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002016
Kent Overstreet829a60b2013-11-11 17:02:31 -08002017 ret |= btree_insert_key(b, &temp.key, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002018 break;
2019 } else {
2020 break;
2021 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002022 }
2023
Kent Overstreet829a60b2013-11-11 17:02:31 -08002024 if (!ret)
2025 op->insert_collision = true;
2026
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002027 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2028
Kent Overstreetdc9d98d2013-12-17 23:47:33 -08002029 BUG_ON(bch_count_data(&b->keys) < oldsize);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002030 return ret;
2031}
2032
Kent Overstreet26c949f2013-09-10 18:41:15 -07002033static int btree_split(struct btree *b, struct btree_op *op,
2034 struct keylist *insert_keys,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002035 struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002036{
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002037 bool split;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002038 struct btree *n1, *n2 = NULL, *n3 = NULL;
2039 uint64_t start_time = local_clock();
Kent Overstreetb54d6932013-07-24 18:04:18 -07002040 struct closure cl;
Kent Overstreet17e21a92013-07-26 12:32:38 -07002041 struct keylist parent_keys;
Kent Overstreetb54d6932013-07-24 18:04:18 -07002042
2043 closure_init_stack(&cl);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002044 bch_keylist_init(&parent_keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002045
Kent Overstreet0a63b662014-03-17 17:15:53 -07002046 if (btree_check_reserve(b, op)) {
2047 if (!b->level)
2048 return -EINTR;
2049 else
2050 WARN(1, "insufficient reserve for split\n");
2051 }
Kent Overstreet78365412013-12-17 01:29:34 -08002052
Kent Overstreet0a63b662014-03-17 17:15:53 -07002053 n1 = btree_node_alloc_replacement(b, op);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002054 if (IS_ERR(n1))
2055 goto err;
2056
Kent Overstreetee811282013-12-17 23:49:49 -08002057 split = set_blocks(btree_bset_first(n1),
2058 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002059
Kent Overstreetcafe5632013-03-23 16:11:31 -07002060 if (split) {
Coly Li6f10f7d2018-08-11 13:19:44 +08002061 unsigned int keys = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002062
Kent Overstreetee811282013-12-17 23:49:49 -08002063 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002064
Slava Pestov2452cc82014-07-12 00:22:53 -07002065 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002066 if (IS_ERR(n2))
2067 goto err_free1;
2068
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002069 if (!b->parent) {
Slava Pestov2452cc82014-07-12 00:22:53 -07002070 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002071 if (IS_ERR(n3))
2072 goto err_free2;
2073 }
2074
Kent Overstreet2a285682014-03-04 16:42:42 -08002075 mutex_lock(&n1->write_lock);
2076 mutex_lock(&n2->write_lock);
2077
Kent Overstreet1b207d82013-09-10 18:52:54 -07002078 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002079
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002080 /*
2081 * Has to be a linear search because we don't have an auxiliary
Kent Overstreetcafe5632013-03-23 16:11:31 -07002082 * search tree yet
2083 */
2084
Kent Overstreetee811282013-12-17 23:49:49 -08002085 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2086 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
Kent Overstreetfafff812013-12-17 21:56:21 -08002087 keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002088
Kent Overstreetfafff812013-12-17 21:56:21 -08002089 bkey_copy_key(&n1->key,
Kent Overstreetee811282013-12-17 23:49:49 -08002090 bset_bkey_idx(btree_bset_first(n1), keys));
2091 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002092
Kent Overstreetee811282013-12-17 23:49:49 -08002093 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2094 btree_bset_first(n1)->keys = keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002095
Kent Overstreetee811282013-12-17 23:49:49 -08002096 memcpy(btree_bset_first(n2)->start,
2097 bset_bkey_last(btree_bset_first(n1)),
2098 btree_bset_first(n2)->keys * sizeof(uint64_t));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002099
2100 bkey_copy_key(&n2->key, &b->key);
2101
Kent Overstreet17e21a92013-07-26 12:32:38 -07002102 bch_keylist_add(&parent_keys, &n2->key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002103 bch_btree_node_write(n2, &cl);
Kent Overstreet2a285682014-03-04 16:42:42 -08002104 mutex_unlock(&n2->write_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002105 rw_unlock(true, n2);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002106 } else {
Kent Overstreetee811282013-12-17 23:49:49 -08002107 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002108
Kent Overstreet2a285682014-03-04 16:42:42 -08002109 mutex_lock(&n1->write_lock);
Kent Overstreet1b207d82013-09-10 18:52:54 -07002110 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
Kent Overstreetc37511b2013-04-26 15:39:55 -07002111 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002112
Kent Overstreet17e21a92013-07-26 12:32:38 -07002113 bch_keylist_add(&parent_keys, &n1->key);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002114 bch_btree_node_write(n1, &cl);
Kent Overstreet2a285682014-03-04 16:42:42 -08002115 mutex_unlock(&n1->write_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002116
2117 if (n3) {
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002118 /* Depth increases, make a new root */
Kent Overstreet2a285682014-03-04 16:42:42 -08002119 mutex_lock(&n3->write_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002120 bkey_copy_key(&n3->key, &MAX_KEY);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002121 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
Kent Overstreetb54d6932013-07-24 18:04:18 -07002122 bch_btree_node_write(n3, &cl);
Kent Overstreet2a285682014-03-04 16:42:42 -08002123 mutex_unlock(&n3->write_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002124
Kent Overstreetb54d6932013-07-24 18:04:18 -07002125 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002126 bch_btree_set_root(n3);
2127 rw_unlock(true, n3);
Kent Overstreetd6fd3b12013-07-24 17:20:19 -07002128 } else if (!b->parent) {
2129 /* Root filled up but didn't need to be split */
Kent Overstreetb54d6932013-07-24 18:04:18 -07002130 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002131 bch_btree_set_root(n1);
2132 } else {
Kent Overstreet17e21a92013-07-26 12:32:38 -07002133 /* Split a non root node */
Kent Overstreetb54d6932013-07-24 18:04:18 -07002134 closure_sync(&cl);
Kent Overstreet17e21a92013-07-26 12:32:38 -07002135 make_btree_freeing_key(b, parent_keys.top);
2136 bch_keylist_push(&parent_keys);
2137
Kent Overstreet17e21a92013-07-26 12:32:38 -07002138 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2139 BUG_ON(!bch_keylist_empty(&parent_keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002140 }
2141
Kent Overstreet05335cf2014-03-17 18:22:34 -07002142 btree_node_free(b);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002143 rw_unlock(true, n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002144
Kent Overstreet169ef1c2013-03-28 12:50:55 -06002145 bch_time_stats_update(&b->c->btree_split_time, start_time);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002146
2147 return 0;
2148err_free2:
Kent Overstreet5f5837d2013-12-16 16:38:49 -08002149 bkey_put(b->c, &n2->key);
Kent Overstreete8e1d462013-07-24 17:27:07 -07002150 btree_node_free(n2);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002151 rw_unlock(true, n2);
2152err_free1:
Kent Overstreet5f5837d2013-12-16 16:38:49 -08002153 bkey_put(b->c, &n1->key);
Kent Overstreete8e1d462013-07-24 17:27:07 -07002154 btree_node_free(n1);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002155 rw_unlock(true, n1);
2156err:
Kent Overstreet0a63b662014-03-17 17:15:53 -07002157 WARN(1, "bcache: btree split failed (level %u)", b->level);
Kent Overstreet5f5837d2013-12-16 16:38:49 -08002158
Kent Overstreetcafe5632013-03-23 16:11:31 -07002159 if (n3 == ERR_PTR(-EAGAIN) ||
2160 n2 == ERR_PTR(-EAGAIN) ||
2161 n1 == ERR_PTR(-EAGAIN))
2162 return -EAGAIN;
2163
Kent Overstreetcafe5632013-03-23 16:11:31 -07002164 return -ENOMEM;
2165}
2166
Kent Overstreet26c949f2013-09-10 18:41:15 -07002167static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
Kent Overstreetc18536a2013-07-24 17:44:17 -07002168 struct keylist *insert_keys,
Kent Overstreet1b207d82013-09-10 18:52:54 -07002169 atomic_t *journal_ref,
2170 struct bkey *replace_key)
Kent Overstreet26c949f2013-09-10 18:41:15 -07002171{
Kent Overstreet2a285682014-03-04 16:42:42 -08002172 struct closure cl;
2173
Kent Overstreet17e21a92013-07-26 12:32:38 -07002174 BUG_ON(b->level && replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002175
Kent Overstreet2a285682014-03-04 16:42:42 -08002176 closure_init_stack(&cl);
2177
2178 mutex_lock(&b->write_lock);
2179
2180 if (write_block(b) != btree_bset_last(b) &&
2181 b->keys.last_set_unwritten)
2182 bch_btree_init_next(b); /* just wrote a set */
2183
Kent Overstreet59158fd2013-11-11 19:03:54 -08002184 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
Kent Overstreet2a285682014-03-04 16:42:42 -08002185 mutex_unlock(&b->write_lock);
2186 goto split;
2187 }
Kent Overstreet3b3e9e52013-12-07 03:57:58 -08002188
Kent Overstreet2a285682014-03-04 16:42:42 -08002189 BUG_ON(write_block(b) != btree_bset_last(b));
2190
2191 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2192 if (!b->level)
2193 bch_btree_leaf_dirty(b, journal_ref);
2194 else
2195 bch_btree_node_write(b, &cl);
2196 }
2197
2198 mutex_unlock(&b->write_lock);
2199
2200 /* wait for btree node write if necessary, after unlock */
2201 closure_sync(&cl);
2202
2203 return 0;
2204split:
2205 if (current->bio_list) {
2206 op->lock = b->c->root->level + 1;
2207 return -EAGAIN;
2208 } else if (op->lock <= b->c->root->level) {
2209 op->lock = b->c->root->level + 1;
2210 return -EINTR;
Kent Overstreet17e21a92013-07-26 12:32:38 -07002211 } else {
Kent Overstreet2a285682014-03-04 16:42:42 -08002212 /* Invalidated all iterators */
2213 int ret = btree_split(b, op, insert_keys, replace_key);
Kent Overstreet26c949f2013-09-10 18:41:15 -07002214
Kent Overstreet2a285682014-03-04 16:42:42 -08002215 if (bch_keylist_empty(insert_keys))
2216 return 0;
2217 else if (!ret)
2218 return -EINTR;
2219 return ret;
Kent Overstreet17e21a92013-07-26 12:32:38 -07002220 }
Kent Overstreet26c949f2013-09-10 18:41:15 -07002221}
2222
Kent Overstreete7c590e2013-09-10 18:39:16 -07002223int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2224 struct bkey *check_key)
2225{
2226 int ret = -EINTR;
2227 uint64_t btree_ptr = b->key.ptr[0];
2228 unsigned long seq = b->seq;
2229 struct keylist insert;
2230 bool upgrade = op->lock == -1;
2231
2232 bch_keylist_init(&insert);
2233
2234 if (upgrade) {
2235 rw_unlock(false, b);
2236 rw_lock(true, b, b->level);
2237
2238 if (b->key.ptr[0] != btree_ptr ||
Coly Lic63ca782018-08-11 13:19:50 +08002239 b->seq != seq + 1) {
Bart Van Asschefd019912018-03-18 17:36:26 -07002240 op->lock = b->level;
Kent Overstreete7c590e2013-09-10 18:39:16 -07002241 goto out;
Coly Lic63ca782018-08-11 13:19:50 +08002242 }
Kent Overstreete7c590e2013-09-10 18:39:16 -07002243 }
2244
2245 SET_KEY_PTRS(check_key, 1);
2246 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2247
2248 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2249
2250 bch_keylist_add(&insert, check_key);
2251
Kent Overstreet1b207d82013-09-10 18:52:54 -07002252 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
Kent Overstreete7c590e2013-09-10 18:39:16 -07002253
2254 BUG_ON(!ret && !bch_keylist_empty(&insert));
2255out:
2256 if (upgrade)
2257 downgrade_write(&b->lock);
2258 return ret;
2259}
2260
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002261struct btree_insert_op {
2262 struct btree_op op;
2263 struct keylist *keys;
2264 atomic_t *journal_ref;
2265 struct bkey *replace_key;
2266};
2267
Wei Yongjun08239ca2013-11-28 10:31:35 +08002268static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002269{
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002270 struct btree_insert_op *op = container_of(b_op,
2271 struct btree_insert_op, op);
Kent Overstreet403b6cd2013-07-24 17:22:44 -07002272
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002273 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2274 op->journal_ref, op->replace_key);
2275 if (ret && !bch_keylist_empty(op->keys))
2276 return ret;
2277 else
2278 return MAP_DONE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002279}
2280
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002281int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2282 atomic_t *journal_ref, struct bkey *replace_key)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002283{
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002284 struct btree_insert_op op;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002285 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002286
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002287 BUG_ON(current->bio_list);
Kent Overstreet4f3d4012013-09-10 18:46:36 -07002288 BUG_ON(bch_keylist_empty(keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002289
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002290 bch_btree_op_init(&op.op, 0);
2291 op.keys = keys;
2292 op.journal_ref = journal_ref;
2293 op.replace_key = replace_key;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002294
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002295 while (!ret && !bch_keylist_empty(keys)) {
2296 op.op.lock = 0;
2297 ret = bch_btree_map_leaf_nodes(&op.op, c,
2298 &START_KEY(keys->keys),
2299 btree_insert_fn);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002300 }
2301
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002302 if (ret) {
2303 struct bkey *k;
2304
2305 pr_err("error %i", ret);
2306
2307 while ((k = bch_keylist_pop(keys)))
Kent Overstreet3a3b6a42013-07-24 16:46:42 -07002308 bkey_put(c, k);
Kent Overstreetcc7b8812013-07-24 18:07:22 -07002309 } else if (op.op.insert_collision)
2310 ret = -ESRCH;
Kent Overstreet6054c6d2013-07-24 18:06:22 -07002311
Kent Overstreetcafe5632013-03-23 16:11:31 -07002312 return ret;
2313}
2314
2315void bch_btree_set_root(struct btree *b)
2316{
Coly Li6f10f7d2018-08-11 13:19:44 +08002317 unsigned int i;
Kent Overstreete49c7c32013-06-26 17:25:38 -07002318 struct closure cl;
2319
2320 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002321
Kent Overstreetc37511b2013-04-26 15:39:55 -07002322 trace_bcache_btree_set_root(b);
2323
Kent Overstreetcafe5632013-03-23 16:11:31 -07002324 BUG_ON(!b->written);
2325
2326 for (i = 0; i < KEY_PTRS(&b->key); i++)
2327 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2328
2329 mutex_lock(&b->c->bucket_lock);
2330 list_del_init(&b->list);
2331 mutex_unlock(&b->c->bucket_lock);
2332
2333 b->c->root = b;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002334
Kent Overstreete49c7c32013-06-26 17:25:38 -07002335 bch_journal_meta(b->c, &cl);
2336 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002337}
2338
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002339/* Map across nodes or keys */
2340
2341static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2342 struct bkey *from,
2343 btree_map_nodes_fn *fn, int flags)
2344{
2345 int ret = MAP_CONTINUE;
2346
2347 if (b->level) {
2348 struct bkey *k;
2349 struct btree_iter iter;
2350
Kent Overstreetc052dd92013-11-11 17:35:24 -08002351 bch_btree_iter_init(&b->keys, &iter, from);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002352
Kent Overstreeta85e9682013-12-20 17:28:16 -08002353 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002354 bch_ptr_bad))) {
2355 ret = btree(map_nodes_recurse, k, b,
2356 op, from, fn, flags);
2357 from = NULL;
2358
2359 if (ret != MAP_CONTINUE)
2360 return ret;
2361 }
2362 }
2363
2364 if (!b->level || flags == MAP_ALL_NODES)
2365 ret = fn(op, b);
2366
2367 return ret;
2368}
2369
2370int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2371 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2372{
Kent Overstreetb54d6932013-07-24 18:04:18 -07002373 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002374}
2375
2376static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2377 struct bkey *from, btree_map_keys_fn *fn,
2378 int flags)
2379{
2380 int ret = MAP_CONTINUE;
2381 struct bkey *k;
2382 struct btree_iter iter;
2383
Kent Overstreetc052dd92013-11-11 17:35:24 -08002384 bch_btree_iter_init(&b->keys, &iter, from);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002385
Kent Overstreeta85e9682013-12-20 17:28:16 -08002386 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002387 ret = !b->level
2388 ? fn(op, b, k)
2389 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2390 from = NULL;
2391
2392 if (ret != MAP_CONTINUE)
2393 return ret;
2394 }
2395
2396 if (!b->level && (flags & MAP_END_KEY))
2397 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2398 KEY_OFFSET(&b->key), 0));
2399
2400 return ret;
2401}
2402
2403int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2404 struct bkey *from, btree_map_keys_fn *fn, int flags)
2405{
Kent Overstreetb54d6932013-07-24 18:04:18 -07002406 return btree_root(map_keys_recurse, c, op, from, fn, flags);
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002407}
2408
Kent Overstreetcafe5632013-03-23 16:11:31 -07002409/* Keybuf code */
2410
2411static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2412{
2413 /* Overlapping keys compare equal */
2414 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2415 return -1;
2416 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2417 return 1;
2418 return 0;
2419}
2420
2421static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2422 struct keybuf_key *r)
2423{
2424 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2425}
2426
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002427struct refill {
2428 struct btree_op op;
Coly Li6f10f7d2018-08-11 13:19:44 +08002429 unsigned int nr_found;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002430 struct keybuf *buf;
2431 struct bkey *end;
2432 keybuf_pred_fn *pred;
2433};
2434
2435static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2436 struct bkey *k)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002437{
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002438 struct refill *refill = container_of(op, struct refill, op);
2439 struct keybuf *buf = refill->buf;
2440 int ret = MAP_CONTINUE;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002441
Tang Junhui2d6cb6e2018-10-08 20:41:14 +08002442 if (bkey_cmp(k, refill->end) > 0) {
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002443 ret = MAP_DONE;
2444 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002445 }
2446
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002447 if (!KEY_SIZE(k)) /* end key */
2448 goto out;
2449
2450 if (refill->pred(buf, k)) {
2451 struct keybuf_key *w;
2452
2453 spin_lock(&buf->lock);
2454
2455 w = array_alloc(&buf->freelist);
2456 if (!w) {
2457 spin_unlock(&buf->lock);
2458 return MAP_DONE;
2459 }
2460
2461 w->private = NULL;
2462 bkey_copy(&w->key, k);
2463
2464 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2465 array_free(&buf->freelist, w);
Kent Overstreet48a915a2013-10-31 15:43:22 -07002466 else
2467 refill->nr_found++;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002468
2469 if (array_freelist_empty(&buf->freelist))
2470 ret = MAP_DONE;
2471
2472 spin_unlock(&buf->lock);
2473 }
2474out:
2475 buf->last_scanned = *k;
2476 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002477}
2478
2479void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
Kent Overstreet72c27062013-06-05 06:24:39 -07002480 struct bkey *end, keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002481{
2482 struct bkey start = buf->last_scanned;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002483 struct refill refill;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002484
2485 cond_resched();
2486
Kent Overstreetb54d6932013-07-24 18:04:18 -07002487 bch_btree_op_init(&refill.op, -1);
Kent Overstreet48a915a2013-10-31 15:43:22 -07002488 refill.nr_found = 0;
2489 refill.buf = buf;
2490 refill.end = end;
2491 refill.pred = pred;
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002492
2493 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2494 refill_keybuf_fn, MAP_END_KEY);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002495
Kent Overstreet48a915a2013-10-31 15:43:22 -07002496 trace_bcache_keyscan(refill.nr_found,
2497 KEY_INODE(&start), KEY_OFFSET(&start),
2498 KEY_INODE(&buf->last_scanned),
2499 KEY_OFFSET(&buf->last_scanned));
Kent Overstreetcafe5632013-03-23 16:11:31 -07002500
2501 spin_lock(&buf->lock);
2502
2503 if (!RB_EMPTY_ROOT(&buf->keys)) {
2504 struct keybuf_key *w;
Coly Li1fae7cf2018-08-11 13:19:45 +08002505
Kent Overstreetcafe5632013-03-23 16:11:31 -07002506 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2507 buf->start = START_KEY(&w->key);
2508
2509 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2510 buf->end = w->key;
2511 } else {
2512 buf->start = MAX_KEY;
2513 buf->end = MAX_KEY;
2514 }
2515
2516 spin_unlock(&buf->lock);
2517}
2518
2519static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2520{
2521 rb_erase(&w->node, &buf->keys);
2522 array_free(&buf->freelist, w);
2523}
2524
2525void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2526{
2527 spin_lock(&buf->lock);
2528 __bch_keybuf_del(buf, w);
2529 spin_unlock(&buf->lock);
2530}
2531
2532bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2533 struct bkey *end)
2534{
2535 bool ret = false;
2536 struct keybuf_key *p, *w, s;
Coly Li1fae7cf2018-08-11 13:19:45 +08002537
Kent Overstreetcafe5632013-03-23 16:11:31 -07002538 s.key = *start;
2539
2540 if (bkey_cmp(end, &buf->start) <= 0 ||
2541 bkey_cmp(start, &buf->end) >= 0)
2542 return false;
2543
2544 spin_lock(&buf->lock);
2545 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2546
2547 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2548 p = w;
2549 w = RB_NEXT(w, node);
2550
2551 if (p->private)
2552 ret = true;
2553 else
2554 __bch_keybuf_del(buf, p);
2555 }
2556
2557 spin_unlock(&buf->lock);
2558 return ret;
2559}
2560
2561struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2562{
2563 struct keybuf_key *w;
Coly Li1fae7cf2018-08-11 13:19:45 +08002564
Kent Overstreetcafe5632013-03-23 16:11:31 -07002565 spin_lock(&buf->lock);
2566
2567 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2568
2569 while (w && w->private)
2570 w = RB_NEXT(w, node);
2571
2572 if (w)
2573 w->private = ERR_PTR(-EINTR);
2574
2575 spin_unlock(&buf->lock);
2576 return w;
2577}
2578
2579struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
Kent Overstreet48dad8b2013-09-10 18:48:51 -07002580 struct keybuf *buf,
2581 struct bkey *end,
2582 keybuf_pred_fn *pred)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002583{
2584 struct keybuf_key *ret;
2585
2586 while (1) {
2587 ret = bch_keybuf_next(buf);
2588 if (ret)
2589 break;
2590
2591 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2592 pr_debug("scan finished");
2593 break;
2594 }
2595
Kent Overstreet72c27062013-06-05 06:24:39 -07002596 bch_refill_keybuf(c, buf, end, pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002597 }
2598
2599 return ret;
2600}
2601
Kent Overstreet72c27062013-06-05 06:24:39 -07002602void bch_keybuf_init(struct keybuf *buf)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002603{
Kent Overstreetcafe5632013-03-23 16:11:31 -07002604 buf->last_scanned = MAX_KEY;
2605 buf->keys = RB_ROOT;
2606
2607 spin_lock_init(&buf->lock);
2608 array_allocator_init(&buf->freelist);
2609}