blob: fc0a31b13ac423bef0dceb21f6748e44037341f4 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * bcache setup/teardown code, and some metadata io - read a superblock and
3 * figure out what to do with it.
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
Kent Overstreet65d45232013-12-20 17:22:05 -080012#include "extents.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070013#include "request.h"
Kent Overstreet279afba2013-06-05 06:21:07 -070014#include "writeback.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070015
Kent Overstreetc37511b2013-04-26 15:39:55 -070016#include <linux/blkdev.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070017#include <linux/buffer_head.h>
18#include <linux/debugfs.h>
19#include <linux/genhd.h>
Kent Overstreet28935ab2013-07-31 01:12:02 -070020#include <linux/idr.h>
Kent Overstreet79826c32013-07-10 18:31:58 -070021#include <linux/kthread.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -070022#include <linux/module.h>
23#include <linux/random.h>
24#include <linux/reboot.h>
25#include <linux/sysfs.h>
26
27MODULE_LICENSE("GPL");
28MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
29
30static const char bcache_magic[] = {
31 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
32 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
33};
34
35static const char invalid_uuid[] = {
36 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
37 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
38};
39
40/* Default is -1; we skip past it for struct cached_dev's cache mode */
41const char * const bch_cache_modes[] = {
42 "default",
43 "writethrough",
44 "writeback",
45 "writearound",
46 "none",
47 NULL
48};
49
Kent Overstreetcafe5632013-03-23 16:11:31 -070050static struct kobject *bcache_kobj;
51struct mutex bch_register_lock;
52LIST_HEAD(bch_cache_sets);
53static LIST_HEAD(uncached_devices);
54
Kent Overstreet28935ab2013-07-31 01:12:02 -070055static int bcache_major;
56static DEFINE_IDA(bcache_minor);
Kent Overstreetcafe5632013-03-23 16:11:31 -070057static wait_queue_head_t unregister_wait;
58struct workqueue_struct *bcache_wq;
59
60#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
Eric Wheelerb8c0d912016-10-23 18:19:20 -070061#define BCACHE_MINORS 16 /* partition support */
Kent Overstreetcafe5632013-03-23 16:11:31 -070062
Kent Overstreetcafe5632013-03-23 16:11:31 -070063/* Superblock */
64
65static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
66 struct page **res)
67{
68 const char *err;
69 struct cache_sb *s;
70 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
71 unsigned i;
72
73 if (!bh)
74 return "IO error";
75
76 s = (struct cache_sb *) bh->b_data;
77
78 sb->offset = le64_to_cpu(s->offset);
79 sb->version = le64_to_cpu(s->version);
80
81 memcpy(sb->magic, s->magic, 16);
82 memcpy(sb->uuid, s->uuid, 16);
83 memcpy(sb->set_uuid, s->set_uuid, 16);
84 memcpy(sb->label, s->label, SB_LABEL_SIZE);
85
86 sb->flags = le64_to_cpu(s->flags);
87 sb->seq = le64_to_cpu(s->seq);
Kent Overstreetcafe5632013-03-23 16:11:31 -070088 sb->last_mount = le32_to_cpu(s->last_mount);
Kent Overstreetcafe5632013-03-23 16:11:31 -070089 sb->first_bucket = le16_to_cpu(s->first_bucket);
90 sb->keys = le16_to_cpu(s->keys);
91
92 for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
93 sb->d[i] = le64_to_cpu(s->d[i]);
94
95 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
96 sb->version, sb->flags, sb->seq, sb->keys);
97
98 err = "Not a bcache superblock";
99 if (sb->offset != SB_SECTOR)
100 goto err;
101
102 if (memcmp(sb->magic, bcache_magic, 16))
103 goto err;
104
105 err = "Too many journal buckets";
106 if (sb->keys > SB_JOURNAL_BUCKETS)
107 goto err;
108
109 err = "Bad checksum";
110 if (s->csum != csum_set(s))
111 goto err;
112
113 err = "Bad UUID";
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600114 if (bch_is_zero(sb->uuid, 16))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700115 goto err;
116
Kent Overstreet8abb2a52013-04-23 21:51:48 -0700117 sb->block_size = le16_to_cpu(s->block_size);
118
119 err = "Superblock block size smaller than device block size";
120 if (sb->block_size << 9 < bdev_logical_block_size(bdev))
121 goto err;
122
Kent Overstreet29033812013-04-11 15:14:35 -0700123 switch (sb->version) {
124 case BCACHE_SB_VERSION_BDEV:
Kent Overstreet29033812013-04-11 15:14:35 -0700125 sb->data_offset = BDEV_DATA_START_DEFAULT;
126 break;
127 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
Kent Overstreet29033812013-04-11 15:14:35 -0700128 sb->data_offset = le64_to_cpu(s->data_offset);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700129
Kent Overstreet29033812013-04-11 15:14:35 -0700130 err = "Bad data offset";
131 if (sb->data_offset < BDEV_DATA_START_DEFAULT)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700132 goto err;
133
Kent Overstreet29033812013-04-11 15:14:35 -0700134 break;
135 case BCACHE_SB_VERSION_CDEV:
136 case BCACHE_SB_VERSION_CDEV_WITH_UUID:
137 sb->nbuckets = le64_to_cpu(s->nbuckets);
Kent Overstreet29033812013-04-11 15:14:35 -0700138 sb->bucket_size = le16_to_cpu(s->bucket_size);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700139
Kent Overstreet29033812013-04-11 15:14:35 -0700140 sb->nr_in_set = le16_to_cpu(s->nr_in_set);
141 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
142
143 err = "Too many buckets";
144 if (sb->nbuckets > LONG_MAX)
145 goto err;
146
147 err = "Not enough buckets";
148 if (sb->nbuckets < 1 << 7)
149 goto err;
150
151 err = "Bad block/bucket size";
152 if (!is_power_of_2(sb->block_size) ||
153 sb->block_size > PAGE_SECTORS ||
154 !is_power_of_2(sb->bucket_size) ||
155 sb->bucket_size < PAGE_SECTORS)
156 goto err;
157
158 err = "Invalid superblock: device too small";
159 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
160 goto err;
161
162 err = "Bad UUID";
163 if (bch_is_zero(sb->set_uuid, 16))
164 goto err;
165
166 err = "Bad cache device number in set";
167 if (!sb->nr_in_set ||
168 sb->nr_in_set <= sb->nr_this_dev ||
169 sb->nr_in_set > MAX_CACHES_PER_SET)
170 goto err;
171
172 err = "Journal buckets not sequential";
173 for (i = 0; i < sb->keys; i++)
174 if (sb->d[i] != sb->first_bucket + i)
175 goto err;
176
177 err = "Too many journal buckets";
178 if (sb->first_bucket + sb->keys > sb->nbuckets)
179 goto err;
180
181 err = "Invalid superblock: first bucket comes before end of super";
182 if (sb->first_bucket * sb->bucket_size < 16)
183 goto err;
184
185 break;
186 default:
187 err = "Unsupported superblock version";
Kent Overstreetcafe5632013-03-23 16:11:31 -0700188 goto err;
Kent Overstreet29033812013-04-11 15:14:35 -0700189 }
190
Kent Overstreetcafe5632013-03-23 16:11:31 -0700191 sb->last_mount = get_seconds();
192 err = NULL;
193
194 get_page(bh->b_page);
195 *res = bh->b_page;
196err:
197 put_bh(bh);
198 return err;
199}
200
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200201static void write_bdev_super_endio(struct bio *bio)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700202{
203 struct cached_dev *dc = bio->bi_private;
204 /* XXX: error checking */
205
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800206 closure_put(&dc->sb_write);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700207}
208
209static void __write_super(struct cache_sb *sb, struct bio *bio)
210{
211 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
212 unsigned i;
213
Kent Overstreet4f024f32013-10-11 15:44:27 -0700214 bio->bi_iter.bi_sector = SB_SECTOR;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700215 bio->bi_iter.bi_size = SB_SIZE;
Mike Christiead0d9e72016-06-05 14:32:05 -0500216 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600217 bch_bio_map(bio, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700218
219 out->offset = cpu_to_le64(sb->offset);
220 out->version = cpu_to_le64(sb->version);
221
222 memcpy(out->uuid, sb->uuid, 16);
223 memcpy(out->set_uuid, sb->set_uuid, 16);
224 memcpy(out->label, sb->label, SB_LABEL_SIZE);
225
226 out->flags = cpu_to_le64(sb->flags);
227 out->seq = cpu_to_le64(sb->seq);
228
229 out->last_mount = cpu_to_le32(sb->last_mount);
230 out->first_bucket = cpu_to_le16(sb->first_bucket);
231 out->keys = cpu_to_le16(sb->keys);
232
233 for (i = 0; i < sb->keys; i++)
234 out->d[i] = cpu_to_le64(sb->d[i]);
235
236 out->csum = csum_set(out);
237
238 pr_debug("ver %llu, flags %llu, seq %llu",
239 sb->version, sb->flags, sb->seq);
240
Mike Christie4e49ea42016-06-05 14:31:41 -0500241 submit_bio(bio);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700242}
243
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800244static void bch_write_bdev_super_unlock(struct closure *cl)
245{
246 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
247
248 up(&dc->sb_write_mutex);
249}
250
Kent Overstreetcafe5632013-03-23 16:11:31 -0700251void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
252{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800253 struct closure *cl = &dc->sb_write;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700254 struct bio *bio = &dc->sb_bio;
255
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800256 down(&dc->sb_write_mutex);
257 closure_init(cl, parent);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700258
259 bio_reset(bio);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200260 bio_set_dev(bio, dc->bdev);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700261 bio->bi_end_io = write_bdev_super_endio;
262 bio->bi_private = dc;
263
264 closure_get(cl);
265 __write_super(&dc->sb, bio);
266
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800267 closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700268}
269
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200270static void write_super_endio(struct bio *bio)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700271{
272 struct cache *ca = bio->bi_private;
273
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200274 bch_count_io_errors(ca, bio->bi_status, "writing superblock");
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800275 closure_put(&ca->set->sb_write);
276}
277
278static void bcache_write_super_unlock(struct closure *cl)
279{
280 struct cache_set *c = container_of(cl, struct cache_set, sb_write);
281
282 up(&c->sb_write_mutex);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700283}
284
285void bcache_write_super(struct cache_set *c)
286{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800287 struct closure *cl = &c->sb_write;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700288 struct cache *ca;
289 unsigned i;
290
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800291 down(&c->sb_write_mutex);
292 closure_init(cl, &c->cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700293
294 c->sb.seq++;
295
296 for_each_cache(ca, c, i) {
297 struct bio *bio = &ca->sb_bio;
298
Kent Overstreet29033812013-04-11 15:14:35 -0700299 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700300 ca->sb.seq = c->sb.seq;
301 ca->sb.last_mount = c->sb.last_mount;
302
303 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
304
305 bio_reset(bio);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200306 bio_set_dev(bio, ca->bdev);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700307 bio->bi_end_io = write_super_endio;
308 bio->bi_private = ca;
309
310 closure_get(cl);
311 __write_super(&ca->sb, bio);
312 }
313
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800314 closure_return_with_destructor(cl, bcache_write_super_unlock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700315}
316
317/* UUID io */
318
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200319static void uuid_endio(struct bio *bio)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700320{
321 struct closure *cl = bio->bi_private;
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800322 struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700323
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200324 cache_set_err_on(bio->bi_status, c, "accessing uuids");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700325 bch_bbio_free(bio, c);
326 closure_put(cl);
327}
328
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800329static void uuid_io_unlock(struct closure *cl)
330{
331 struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
332
333 up(&c->uuid_write_mutex);
334}
335
Mike Christiead0d9e72016-06-05 14:32:05 -0500336static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700337 struct bkey *k, struct closure *parent)
338{
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800339 struct closure *cl = &c->uuid_write;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700340 struct uuid_entry *u;
341 unsigned i;
Kent Overstreet85b14922013-05-14 20:33:16 -0700342 char buf[80];
Kent Overstreetcafe5632013-03-23 16:11:31 -0700343
344 BUG_ON(!parent);
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800345 down(&c->uuid_write_mutex);
346 closure_init(cl, parent);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700347
348 for (i = 0; i < KEY_PTRS(k); i++) {
349 struct bio *bio = bch_bbio_alloc(c);
350
Jens Axboe1eff9d32016-08-05 15:35:16 -0600351 bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700352 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700353
354 bio->bi_end_io = uuid_endio;
355 bio->bi_private = cl;
Mike Christiead0d9e72016-06-05 14:32:05 -0500356 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600357 bch_bio_map(bio, c->uuids);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700358
359 bch_submit_bbio(bio, c, k, i);
360
Mike Christiead0d9e72016-06-05 14:32:05 -0500361 if (op != REQ_OP_WRITE)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700362 break;
363 }
364
Kent Overstreetdc9d98d2013-12-17 23:47:33 -0800365 bch_extent_to_text(buf, sizeof(buf), k);
Mike Christiead0d9e72016-06-05 14:32:05 -0500366 pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700367
368 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600369 if (!bch_is_zero(u->uuid, 16))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700370 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
371 u - c->uuids, u->uuid, u->label,
372 u->first_reg, u->last_reg, u->invalidated);
373
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800374 closure_return_with_destructor(cl, uuid_io_unlock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700375}
376
377static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
378{
379 struct bkey *k = &j->uuid_bucket;
380
Kent Overstreet65d45232013-12-20 17:22:05 -0800381 if (__bch_btree_ptr_invalid(c, k))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700382 return "bad uuid pointer";
383
384 bkey_copy(&c->uuid_bucket, k);
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600385 uuid_io(c, REQ_OP_READ, 0, k, cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700386
387 if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
388 struct uuid_entry_v0 *u0 = (void *) c->uuids;
389 struct uuid_entry *u1 = (void *) c->uuids;
390 int i;
391
392 closure_sync(cl);
393
394 /*
395 * Since the new uuid entry is bigger than the old, we have to
396 * convert starting at the highest memory address and work down
397 * in order to do it in place
398 */
399
400 for (i = c->nr_uuids - 1;
401 i >= 0;
402 --i) {
403 memcpy(u1[i].uuid, u0[i].uuid, 16);
404 memcpy(u1[i].label, u0[i].label, 32);
405
406 u1[i].first_reg = u0[i].first_reg;
407 u1[i].last_reg = u0[i].last_reg;
408 u1[i].invalidated = u0[i].invalidated;
409
410 u1[i].flags = 0;
411 u1[i].sectors = 0;
412 }
413 }
414
415 return NULL;
416}
417
418static int __uuid_write(struct cache_set *c)
419{
420 BKEY_PADDED(key) k;
421 struct closure cl;
422 closure_init_stack(&cl);
423
424 lockdep_assert_held(&bch_register_lock);
425
Kent Overstreet78365412013-12-17 01:29:34 -0800426 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700427 return 1;
428
429 SET_KEY_SIZE(&k.key, c->sb.bucket_size);
Mike Christiead0d9e72016-06-05 14:32:05 -0500430 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700431 closure_sync(&cl);
432
433 bkey_copy(&c->uuid_bucket, &k.key);
Kent Overstreet3a3b6a42013-07-24 16:46:42 -0700434 bkey_put(c, &k.key);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700435 return 0;
436}
437
438int bch_uuid_write(struct cache_set *c)
439{
440 int ret = __uuid_write(c);
441
442 if (!ret)
443 bch_journal_meta(c, NULL);
444
445 return ret;
446}
447
448static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
449{
450 struct uuid_entry *u;
451
452 for (u = c->uuids;
453 u < c->uuids + c->nr_uuids; u++)
454 if (!memcmp(u->uuid, uuid, 16))
455 return u;
456
457 return NULL;
458}
459
460static struct uuid_entry *uuid_find_empty(struct cache_set *c)
461{
462 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
463 return uuid_find(c, zero_uuid);
464}
465
466/*
467 * Bucket priorities/gens:
468 *
469 * For each bucket, we store on disk its
470 * 8 bit gen
471 * 16 bit priority
472 *
473 * See alloc.c for an explanation of the gen. The priority is used to implement
474 * lru (and in the future other) cache replacement policies; for most purposes
475 * it's just an opaque integer.
476 *
477 * The gens and the priorities don't have a whole lot to do with each other, and
478 * it's actually the gens that must be written out at specific times - it's no
479 * big deal if the priorities don't get written, if we lose them we just reuse
480 * buckets in suboptimal order.
481 *
482 * On disk they're stored in a packed array, and in as many buckets are required
483 * to fit them all. The buckets we use to store them form a list; the journal
484 * header points to the first bucket, the first bucket points to the second
485 * bucket, et cetera.
486 *
487 * This code is used by the allocation code; periodically (whenever it runs out
488 * of buckets to allocate from) the allocation code will invalidate some
489 * buckets, but it can't use those buckets until their new gens are safely on
490 * disk.
491 */
492
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200493static void prio_endio(struct bio *bio)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700494{
495 struct cache *ca = bio->bi_private;
496
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200497 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
Kent Overstreetcafe5632013-03-23 16:11:31 -0700498 bch_bbio_free(bio, ca->set);
499 closure_put(&ca->prio);
500}
501
Mike Christiead0d9e72016-06-05 14:32:05 -0500502static void prio_io(struct cache *ca, uint64_t bucket, int op,
503 unsigned long op_flags)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700504{
505 struct closure *cl = &ca->prio;
506 struct bio *bio = bch_bbio_alloc(ca->set);
507
508 closure_init_stack(cl);
509
Kent Overstreet4f024f32013-10-11 15:44:27 -0700510 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200511 bio_set_dev(bio, ca->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700512 bio->bi_iter.bi_size = bucket_bytes(ca);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700513
514 bio->bi_end_io = prio_endio;
515 bio->bi_private = ca;
Mike Christiead0d9e72016-06-05 14:32:05 -0500516 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600517 bch_bio_map(bio, ca->disk_buckets);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700518
Kent Overstreet749b61d2013-11-23 23:11:25 -0800519 closure_bio_submit(bio, &ca->prio);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700520 closure_sync(cl);
521}
522
Kent Overstreetcafe5632013-03-23 16:11:31 -0700523void bch_prio_write(struct cache *ca)
524{
525 int i;
526 struct bucket *b;
527 struct closure cl;
528
529 closure_init_stack(&cl);
530
531 lockdep_assert_held(&ca->set->bucket_lock);
532
Kent Overstreetcafe5632013-03-23 16:11:31 -0700533 ca->disk_buckets->seq++;
534
535 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
536 &ca->meta_sectors_written);
537
Kent Overstreet78365412013-12-17 01:29:34 -0800538 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
539 // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700540
541 for (i = prio_buckets(ca) - 1; i >= 0; --i) {
542 long bucket;
543 struct prio_set *p = ca->disk_buckets;
Kent Overstreetb1a67b02013-03-25 11:46:44 -0700544 struct bucket_disk *d = p->data;
545 struct bucket_disk *end = d + prios_per_bucket(ca);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700546
547 for (b = ca->buckets + i * prios_per_bucket(ca);
548 b < ca->buckets + ca->sb.nbuckets && d < end;
549 b++, d++) {
550 d->prio = cpu_to_le16(b->prio);
551 d->gen = b->gen;
552 }
553
554 p->next_bucket = ca->prio_buckets[i + 1];
Kent Overstreet81ab4192013-10-31 15:46:42 -0700555 p->magic = pset_magic(&ca->sb);
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600556 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700557
Kent Overstreet78365412013-12-17 01:29:34 -0800558 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700559 BUG_ON(bucket == -1);
560
561 mutex_unlock(&ca->set->bucket_lock);
Mike Christiead0d9e72016-06-05 14:32:05 -0500562 prio_io(ca, bucket, REQ_OP_WRITE, 0);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700563 mutex_lock(&ca->set->bucket_lock);
564
565 ca->prio_buckets[i] = bucket;
566 atomic_dec_bug(&ca->buckets[bucket].pin);
567 }
568
569 mutex_unlock(&ca->set->bucket_lock);
570
571 bch_journal_meta(ca->set, &cl);
572 closure_sync(&cl);
573
574 mutex_lock(&ca->set->bucket_lock);
575
Kent Overstreetcafe5632013-03-23 16:11:31 -0700576 /*
577 * Don't want the old priorities to get garbage collected until after we
578 * finish writing the new ones, and they're journalled
579 */
Kent Overstreet2531d9ee2014-03-17 16:55:55 -0700580 for (i = 0; i < prio_buckets(ca); i++) {
581 if (ca->prio_last_buckets[i])
582 __bch_bucket_free(ca,
583 &ca->buckets[ca->prio_last_buckets[i]]);
584
Kent Overstreetcafe5632013-03-23 16:11:31 -0700585 ca->prio_last_buckets[i] = ca->prio_buckets[i];
Kent Overstreet2531d9ee2014-03-17 16:55:55 -0700586 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700587}
588
589static void prio_read(struct cache *ca, uint64_t bucket)
590{
591 struct prio_set *p = ca->disk_buckets;
592 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
593 struct bucket *b;
594 unsigned bucket_nr = 0;
595
596 for (b = ca->buckets;
597 b < ca->buckets + ca->sb.nbuckets;
598 b++, d++) {
599 if (d == end) {
600 ca->prio_buckets[bucket_nr] = bucket;
601 ca->prio_last_buckets[bucket_nr] = bucket;
602 bucket_nr++;
603
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600604 prio_io(ca, bucket, REQ_OP_READ, 0);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700605
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600606 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700607 pr_warn("bad csum reading priorities");
608
Kent Overstreet81ab4192013-10-31 15:46:42 -0700609 if (p->magic != pset_magic(&ca->sb))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700610 pr_warn("bad magic reading priorities");
611
612 bucket = p->next_bucket;
613 d = p->data;
614 }
615
616 b->prio = le16_to_cpu(d->prio);
Kent Overstreet3a2fd9d2014-02-27 17:51:12 -0800617 b->gen = b->last_gc = d->gen;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700618 }
619}
620
621/* Bcache device */
622
623static int open_dev(struct block_device *b, fmode_t mode)
624{
625 struct bcache_device *d = b->bd_disk->private_data;
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700626 if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700627 return -ENXIO;
628
629 closure_get(&d->cl);
630 return 0;
631}
632
Emil Goode867e1162013-05-09 22:39:26 +0200633static void release_dev(struct gendisk *b, fmode_t mode)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700634{
635 struct bcache_device *d = b->private_data;
636 closure_put(&d->cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700637}
638
639static int ioctl_dev(struct block_device *b, fmode_t mode,
640 unsigned int cmd, unsigned long arg)
641{
642 struct bcache_device *d = b->bd_disk->private_data;
643 return d->ioctl(d, mode, cmd, arg);
644}
645
646static const struct block_device_operations bcache_ops = {
647 .open = open_dev,
648 .release = release_dev,
649 .ioctl = ioctl_dev,
650 .owner = THIS_MODULE,
651};
652
653void bcache_device_stop(struct bcache_device *d)
654{
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700655 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700656 closure_queue(&d->cl);
657}
658
Kent Overstreetee668502013-02-01 07:29:41 -0800659static void bcache_device_unlink(struct bcache_device *d)
660{
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700661 lockdep_assert_held(&bch_register_lock);
Kent Overstreetee668502013-02-01 07:29:41 -0800662
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700663 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
664 unsigned i;
665 struct cache *ca;
Kent Overstreetee668502013-02-01 07:29:41 -0800666
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700667 sysfs_remove_link(&d->c->kobj, d->name);
668 sysfs_remove_link(&d->kobj, "cache");
669
670 for_each_cache(ca, d->c, i)
671 bd_unlink_disk_holder(ca->bdev, d->disk);
672 }
Kent Overstreetee668502013-02-01 07:29:41 -0800673}
674
675static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
676 const char *name)
677{
678 unsigned i;
679 struct cache *ca;
680
681 for_each_cache(ca, d->c, i)
682 bd_link_disk_holder(ca->bdev, d->disk);
683
684 snprintf(d->name, BCACHEDEVNAME_SIZE,
685 "%s%u", name, d->id);
686
687 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
688 sysfs_create_link(&c->kobj, &d->kobj, d->name),
689 "Couldn't create device <-> cache set symlinks");
Zheng Liufecaee62015-11-29 17:19:32 -0800690
691 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
Kent Overstreetee668502013-02-01 07:29:41 -0800692}
693
Kent Overstreetcafe5632013-03-23 16:11:31 -0700694static void bcache_device_detach(struct bcache_device *d)
695{
696 lockdep_assert_held(&bch_register_lock);
697
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700698 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700699 struct uuid_entry *u = d->c->uuids + d->id;
700
701 SET_UUID_FLASH_ONLY(u, 0);
702 memcpy(u->uuid, invalid_uuid, 16);
703 u->invalidated = cpu_to_le32(get_seconds());
704 bch_uuid_write(d->c);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700705 }
706
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700707 bcache_device_unlink(d);
Kent Overstreetee668502013-02-01 07:29:41 -0800708
Kent Overstreetcafe5632013-03-23 16:11:31 -0700709 d->c->devices[d->id] = NULL;
710 closure_put(&d->c->caching);
711 d->c = NULL;
712}
713
714static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
715 unsigned id)
716{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700717 d->id = id;
718 d->c = c;
719 c->devices[id] = d;
720
721 closure_get(&c->caching);
722}
723
Kent Overstreetcafe5632013-03-23 16:11:31 -0700724static void bcache_device_free(struct bcache_device *d)
725{
726 lockdep_assert_held(&bch_register_lock);
727
728 pr_info("%s stopped", d->disk->disk_name);
729
730 if (d->c)
731 bcache_device_detach(d);
Kent Overstreetf59fce82013-05-15 00:11:26 -0700732 if (d->disk && d->disk->flags & GENHD_FL_UP)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700733 del_gendisk(d->disk);
734 if (d->disk && d->disk->queue)
735 blk_cleanup_queue(d->disk->queue);
Kent Overstreet28935ab2013-07-31 01:12:02 -0700736 if (d->disk) {
737 ida_simple_remove(&bcache_minor, d->disk->first_minor);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700738 put_disk(d->disk);
Kent Overstreet28935ab2013-07-31 01:12:02 -0700739 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700740
Kent Overstreetcafe5632013-03-23 16:11:31 -0700741 if (d->bio_split)
742 bioset_free(d->bio_split);
Pekka Enberg958b4332015-06-30 14:59:30 -0700743 kvfree(d->full_dirty_stripes);
744 kvfree(d->stripe_sectors_dirty);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700745
746 closure_debug_destroy(&d->cl);
747}
748
Kent Overstreet279afba2013-06-05 06:21:07 -0700749static int bcache_device_init(struct bcache_device *d, unsigned block_size,
750 sector_t sectors)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700751{
752 struct request_queue *q;
Kent Overstreet279afba2013-06-05 06:21:07 -0700753 size_t n;
Kent Overstreet28935ab2013-07-31 01:12:02 -0700754 int minor;
Kent Overstreet279afba2013-06-05 06:21:07 -0700755
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700756 if (!d->stripe_size)
757 d->stripe_size = 1 << 31;
Kent Overstreet279afba2013-06-05 06:21:07 -0700758
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700759 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
Kent Overstreet279afba2013-06-05 06:21:07 -0700760
Kent Overstreet48a915a2013-10-31 15:43:22 -0700761 if (!d->nr_stripes ||
762 d->nr_stripes > INT_MAX ||
763 d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
Eric Wheeler90706092016-08-18 20:15:26 -0700764 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
765 (unsigned)d->nr_stripes);
Kent Overstreet279afba2013-06-05 06:21:07 -0700766 return -ENOMEM;
Kent Overstreet48a915a2013-10-31 15:43:22 -0700767 }
Kent Overstreet279afba2013-06-05 06:21:07 -0700768
769 n = d->nr_stripes * sizeof(atomic_t);
Michal Hockobc4e54f2017-05-08 15:57:37 -0700770 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
Kent Overstreet279afba2013-06-05 06:21:07 -0700771 if (!d->stripe_sectors_dirty)
772 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700773
Kent Overstreet48a915a2013-10-31 15:43:22 -0700774 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
Michal Hockobc4e54f2017-05-08 15:57:37 -0700775 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
Kent Overstreet48a915a2013-10-31 15:43:22 -0700776 if (!d->full_dirty_stripes)
777 return -ENOMEM;
778
Kent Overstreet28935ab2013-07-31 01:12:02 -0700779 minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
780 if (minor < 0)
781 return minor;
782
Eric Wheelerb8c0d912016-10-23 18:19:20 -0700783 minor *= BCACHE_MINORS;
784
NeilBrown47e0fb42017-06-18 14:38:57 +1000785 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio),
786 BIOSET_NEED_BVECS |
787 BIOSET_NEED_RESCUER)) ||
Eric Wheelerb8c0d912016-10-23 18:19:20 -0700788 !(d->disk = alloc_disk(BCACHE_MINORS))) {
Kent Overstreet28935ab2013-07-31 01:12:02 -0700789 ida_simple_remove(&bcache_minor, minor);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700790 return -ENOMEM;
Kent Overstreet28935ab2013-07-31 01:12:02 -0700791 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700792
Kent Overstreet279afba2013-06-05 06:21:07 -0700793 set_capacity(d->disk, sectors);
Kent Overstreet28935ab2013-07-31 01:12:02 -0700794 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700795
796 d->disk->major = bcache_major;
Kent Overstreet28935ab2013-07-31 01:12:02 -0700797 d->disk->first_minor = minor;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700798 d->disk->fops = &bcache_ops;
799 d->disk->private_data = d;
800
Kent Overstreet28935ab2013-07-31 01:12:02 -0700801 q = blk_alloc_queue(GFP_KERNEL);
802 if (!q)
803 return -ENOMEM;
804
Kent Overstreetcafe5632013-03-23 16:11:31 -0700805 blk_queue_make_request(q, NULL);
806 d->disk->queue = q;
807 q->queuedata = d;
Jan Karadc3b17c2017-02-02 15:56:50 +0100808 q->backing_dev_info->congested_data = d;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700809 q->limits.max_hw_sectors = UINT_MAX;
810 q->limits.max_sectors = UINT_MAX;
811 q->limits.max_segment_size = UINT_MAX;
812 q->limits.max_segments = BIO_MAX_PAGES;
Jens Axboe2bb4cd52015-07-14 08:15:12 -0600813 blk_queue_max_discard_sectors(q, UINT_MAX);
Kent Overstreet90db6912014-02-10 17:26:40 -0800814 q->limits.discard_granularity = 512;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700815 q->limits.io_min = block_size;
816 q->limits.logical_block_size = block_size;
817 q->limits.physical_block_size = block_size;
818 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
Mike Snitzerb277da02014-10-04 10:55:32 -0600819 clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700820 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
821
Jens Axboe84b4ff92016-03-30 10:13:22 -0600822 blk_queue_write_cache(q, true, true);
Kent Overstreet54d12f22013-07-10 18:44:40 -0700823
Kent Overstreetcafe5632013-03-23 16:11:31 -0700824 return 0;
825}
826
827/* Cached device */
828
829static void calc_cached_dev_sectors(struct cache_set *c)
830{
831 uint64_t sectors = 0;
832 struct cached_dev *dc;
833
834 list_for_each_entry(dc, &c->cached_devs, list)
835 sectors += bdev_sectors(dc->bdev);
836
837 c->cached_dev_sectors = sectors;
838}
839
840void bch_cached_dev_run(struct cached_dev *dc)
841{
842 struct bcache_device *d = &dc->disk;
Gabriel de Perthuisab9e1402013-06-09 00:54:48 +0200843 char buf[SB_LABEL_SIZE + 1];
Gabriel de Perthuisa25c32b2013-06-07 23:27:01 +0200844 char *env[] = {
845 "DRIVER=bcache",
846 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
Gabriel de Perthuisab9e1402013-06-09 00:54:48 +0200847 NULL,
848 NULL,
Gabriel de Perthuisa25c32b2013-06-07 23:27:01 +0200849 };
Kent Overstreetcafe5632013-03-23 16:11:31 -0700850
Gabriel de Perthuisab9e1402013-06-09 00:54:48 +0200851 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
852 buf[SB_LABEL_SIZE] = '\0';
853 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
854
Al Viro4d4d8572015-11-29 17:20:59 -0800855 if (atomic_xchg(&dc->running, 1)) {
856 kfree(env[1]);
857 kfree(env[2]);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700858 return;
Al Viro4d4d8572015-11-29 17:20:59 -0800859 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700860
861 if (!d->c &&
862 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
863 struct closure cl;
864 closure_init_stack(&cl);
865
866 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
867 bch_write_bdev_super(dc, &cl);
868 closure_sync(&cl);
869 }
870
871 add_disk(d->disk);
Kent Overstreetee668502013-02-01 07:29:41 -0800872 bd_link_disk_holder(dc->bdev, dc->disk.disk);
Gabriel de Perthuisa25c32b2013-06-07 23:27:01 +0200873 /* won't show up in the uevent file, use udevadm monitor -e instead
874 * only class / kset properties are persistent */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700875 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
Gabriel de Perthuisa25c32b2013-06-07 23:27:01 +0200876 kfree(env[1]);
Gabriel de Perthuisab9e1402013-06-09 00:54:48 +0200877 kfree(env[2]);
Gabriel de Perthuisa25c32b2013-06-07 23:27:01 +0200878
Kent Overstreetcafe5632013-03-23 16:11:31 -0700879 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
880 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
881 pr_debug("error creating sysfs link");
882}
883
884static void cached_dev_detach_finish(struct work_struct *w)
885{
886 struct cached_dev *dc = container_of(w, struct cached_dev, detach);
887 char buf[BDEVNAME_SIZE];
888 struct closure cl;
889 closure_init_stack(&cl);
890
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700891 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700892 BUG_ON(atomic_read(&dc->count));
893
Kent Overstreetcafe5632013-03-23 16:11:31 -0700894 mutex_lock(&bch_register_lock);
895
896 memset(&dc->sb.set_uuid, 0, 16);
897 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
898
899 bch_write_bdev_super(dc, &cl);
900 closure_sync(&cl);
901
902 bcache_device_detach(&dc->disk);
903 list_move(&dc->list, &uncached_devices);
904
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700905 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
Kent Overstreet5b1016e2014-03-19 17:49:37 -0700906 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700907
Kent Overstreetcafe5632013-03-23 16:11:31 -0700908 mutex_unlock(&bch_register_lock);
909
910 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
911
912 /* Drop ref we took in cached_dev_detach() */
913 closure_put(&dc->disk.cl);
914}
915
916void bch_cached_dev_detach(struct cached_dev *dc)
917{
918 lockdep_assert_held(&bch_register_lock);
919
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700920 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700921 return;
922
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700923 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700924 return;
925
926 /*
927 * Block the device from being closed and freed until we're finished
928 * detaching
929 */
930 closure_get(&dc->disk.cl);
931
932 bch_writeback_queue(dc);
933 cached_dev_put(dc);
934}
935
936int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
937{
938 uint32_t rtime = cpu_to_le32(get_seconds());
939 struct uuid_entry *u;
940 char buf[BDEVNAME_SIZE];
941
942 bdevname(dc->bdev, buf);
943
944 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
945 return -ENOENT;
946
947 if (dc->disk.c) {
948 pr_err("Can't attach %s: already attached", buf);
949 return -EINVAL;
950 }
951
952 if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
953 pr_err("Can't attach %s: shutting down", buf);
954 return -EINVAL;
955 }
956
957 if (dc->sb.block_size < c->sb.block_size) {
958 /* Will die */
Kent Overstreetb1a67b02013-03-25 11:46:44 -0700959 pr_err("Couldn't attach %s: block size less than set's block size",
960 buf);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700961 return -EINVAL;
962 }
963
964 u = uuid_find(c, dc->sb.uuid);
965
966 if (u &&
967 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
968 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
969 memcpy(u->uuid, invalid_uuid, 16);
970 u->invalidated = cpu_to_le32(get_seconds());
971 u = NULL;
972 }
973
974 if (!u) {
975 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
976 pr_err("Couldn't find uuid for %s in set", buf);
977 return -ENOENT;
978 }
979
980 u = uuid_find_empty(c);
981 if (!u) {
982 pr_err("Not caching %s, no room for UUID", buf);
983 return -EINVAL;
984 }
985 }
986
987 /* Deadlocks since we're called via sysfs...
988 sysfs_remove_file(&dc->kobj, &sysfs_attach);
989 */
990
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600991 if (bch_is_zero(u->uuid, 16)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700992 struct closure cl;
993 closure_init_stack(&cl);
994
995 memcpy(u->uuid, dc->sb.uuid, 16);
996 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
997 u->first_reg = u->last_reg = rtime;
998 bch_uuid_write(c);
999
1000 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
1001 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
1002
1003 bch_write_bdev_super(dc, &cl);
1004 closure_sync(&cl);
1005 } else {
1006 u->last_reg = rtime;
1007 bch_uuid_write(c);
1008 }
1009
1010 bcache_device_attach(&dc->disk, c, u - c->uuids);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001011 list_move(&dc->list, &c->cached_devs);
1012 calc_cached_dev_sectors(c);
1013
1014 smp_wmb();
1015 /*
1016 * dc->c must be set before dc->count != 0 - paired with the mb in
1017 * cached_dev_get()
1018 */
1019 atomic_set(&dc->count, 1);
1020
Eric Wheeler07cc6ef82016-02-26 14:39:06 -08001021 /* Block writeback thread, but spawn it */
1022 down_write(&dc->writeback_lock);
1023 if (bch_cached_dev_writeback_start(dc)) {
1024 up_write(&dc->writeback_lock);
Slava Pestov9e5c3532014-05-01 13:48:57 -07001025 return -ENOMEM;
Eric Wheeler07cc6ef82016-02-26 14:39:06 -08001026 }
Slava Pestov9e5c3532014-05-01 13:48:57 -07001027
Kent Overstreetcafe5632013-03-23 16:11:31 -07001028 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
Tang Junhui175206c2017-09-07 01:28:53 +08001029 bch_sectors_dirty_init(&dc->disk);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001030 atomic_set(&dc->has_dirty, 1);
1031 atomic_inc(&dc->count);
1032 bch_writeback_queue(dc);
1033 }
1034
1035 bch_cached_dev_run(dc);
Kent Overstreetee668502013-02-01 07:29:41 -08001036 bcache_device_link(&dc->disk, c, "bdev");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001037
Eric Wheeler07cc6ef82016-02-26 14:39:06 -08001038 /* Allow the writeback thread to proceed */
1039 up_write(&dc->writeback_lock);
1040
Kent Overstreetcafe5632013-03-23 16:11:31 -07001041 pr_info("Caching %s as %s on set %pU",
1042 bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
1043 dc->disk.c->sb.set_uuid);
1044 return 0;
1045}
1046
1047void bch_cached_dev_release(struct kobject *kobj)
1048{
1049 struct cached_dev *dc = container_of(kobj, struct cached_dev,
1050 disk.kobj);
1051 kfree(dc);
1052 module_put(THIS_MODULE);
1053}
1054
1055static void cached_dev_free(struct closure *cl)
1056{
1057 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1058
1059 cancel_delayed_work_sync(&dc->writeback_rate_update);
Slava Pestova664d0f2014-05-20 12:20:28 -07001060 if (!IS_ERR_OR_NULL(dc->writeback_thread))
1061 kthread_stop(dc->writeback_thread);
Tang Junhui9baf3092017-09-06 14:25:59 +08001062 if (dc->writeback_write_wq)
1063 destroy_workqueue(dc->writeback_write_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001064
1065 mutex_lock(&bch_register_lock);
1066
Kent Overstreetf59fce82013-05-15 00:11:26 -07001067 if (atomic_read(&dc->running))
1068 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001069 bcache_device_free(&dc->disk);
1070 list_del(&dc->list);
1071
1072 mutex_unlock(&bch_register_lock);
1073
Kent Overstreet0781c872014-07-07 13:03:36 -07001074 if (!IS_ERR_OR_NULL(dc->bdev))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001075 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001076
1077 wake_up(&unregister_wait);
1078
1079 kobject_put(&dc->disk.kobj);
1080}
1081
1082static void cached_dev_flush(struct closure *cl)
1083{
1084 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1085 struct bcache_device *d = &dc->disk;
1086
Kent Overstreetc9502ea2013-07-10 21:25:02 -07001087 mutex_lock(&bch_register_lock);
Kent Overstreetc4d951d2013-08-21 17:49:09 -07001088 bcache_device_unlink(d);
Kent Overstreetc9502ea2013-07-10 21:25:02 -07001089 mutex_unlock(&bch_register_lock);
1090
Kent Overstreetcafe5632013-03-23 16:11:31 -07001091 bch_cache_accounting_destroy(&dc->accounting);
1092 kobject_del(&d->kobj);
1093
1094 continue_at(cl, cached_dev_free, system_wq);
1095}
1096
1097static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1098{
Kent Overstreetf59fce82013-05-15 00:11:26 -07001099 int ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001100 struct io *io;
Kent Overstreetf59fce82013-05-15 00:11:26 -07001101 struct request_queue *q = bdev_get_queue(dc->bdev);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001102
1103 __module_get(THIS_MODULE);
1104 INIT_LIST_HEAD(&dc->list);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001105 closure_init(&dc->disk.cl, NULL);
1106 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001107 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001108 INIT_WORK(&dc->detach, cached_dev_detach_finish);
Kent Overstreetcb7a5832013-12-16 15:27:25 -08001109 sema_init(&dc->sb_write_mutex, 1);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001110 INIT_LIST_HEAD(&dc->io_lru);
1111 spin_lock_init(&dc->io_lock);
1112 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001113
Kent Overstreetcafe5632013-03-23 16:11:31 -07001114 dc->sequential_cutoff = 4 << 20;
1115
Kent Overstreetcafe5632013-03-23 16:11:31 -07001116 for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1117 list_add(&io->lru, &dc->io_lru);
1118 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1119 }
1120
Kent Overstreetc78afc62013-07-11 22:39:53 -07001121 dc->disk.stripe_size = q->limits.io_opt >> 9;
1122
1123 if (dc->disk.stripe_size)
1124 dc->partial_stripes_expensive =
1125 q->limits.raid_partial_stripes_expensive;
1126
Kent Overstreet279afba2013-06-05 06:21:07 -07001127 ret = bcache_device_init(&dc->disk, block_size,
1128 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001129 if (ret)
1130 return ret;
1131
1132 set_capacity(dc->disk.disk,
1133 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1134
Jan Karadc3b17c2017-02-02 15:56:50 +01001135 dc->disk.disk->queue->backing_dev_info->ra_pages =
1136 max(dc->disk.disk->queue->backing_dev_info->ra_pages,
1137 q->backing_dev_info->ra_pages);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001138
1139 bch_cached_dev_request_init(dc);
1140 bch_cached_dev_writeback_init(dc);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001141 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001142}
1143
1144/* Cached device - bcache superblock */
1145
Kent Overstreetf59fce82013-05-15 00:11:26 -07001146static void register_bdev(struct cache_sb *sb, struct page *sb_page,
Kent Overstreetcafe5632013-03-23 16:11:31 -07001147 struct block_device *bdev,
1148 struct cached_dev *dc)
1149{
1150 char name[BDEVNAME_SIZE];
1151 const char *err = "cannot allocate memory";
Kent Overstreetcafe5632013-03-23 16:11:31 -07001152 struct cache_set *c;
1153
Kent Overstreetcafe5632013-03-23 16:11:31 -07001154 memcpy(&dc->sb, sb, sizeof(struct cache_sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001155 dc->bdev = bdev;
1156 dc->bdev->bd_holder = dc;
1157
Ming Lei3a83f462016-11-22 08:57:21 -07001158 bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001159 dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1160 get_page(sb_page);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001161
Kent Overstreetf59fce82013-05-15 00:11:26 -07001162 if (cached_dev_init(dc, sb->block_size << 9))
1163 goto err;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001164
1165 err = "error creating kobject";
1166 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
1167 "bcache"))
1168 goto err;
1169 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1170 goto err;
1171
Kent Overstreetf59fce82013-05-15 00:11:26 -07001172 pr_info("registered backing device %s", bdevname(bdev, name));
1173
Kent Overstreetcafe5632013-03-23 16:11:31 -07001174 list_add(&dc->list, &uncached_devices);
1175 list_for_each_entry(c, &bch_cache_sets, list)
1176 bch_cached_dev_attach(dc, c);
1177
1178 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1179 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
1180 bch_cached_dev_run(dc);
1181
Kent Overstreetf59fce82013-05-15 00:11:26 -07001182 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001183err:
Kent Overstreetcafe5632013-03-23 16:11:31 -07001184 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001185 bcache_device_stop(&dc->disk);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001186}
1187
1188/* Flash only volumes */
1189
1190void bch_flash_dev_release(struct kobject *kobj)
1191{
1192 struct bcache_device *d = container_of(kobj, struct bcache_device,
1193 kobj);
1194 kfree(d);
1195}
1196
1197static void flash_dev_free(struct closure *cl)
1198{
1199 struct bcache_device *d = container_of(cl, struct bcache_device, cl);
Slava Pestove5112202014-04-29 15:39:27 -07001200 mutex_lock(&bch_register_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001201 bcache_device_free(d);
Slava Pestove5112202014-04-29 15:39:27 -07001202 mutex_unlock(&bch_register_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001203 kobject_put(&d->kobj);
1204}
1205
1206static void flash_dev_flush(struct closure *cl)
1207{
1208 struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1209
Slava Pestove5112202014-04-29 15:39:27 -07001210 mutex_lock(&bch_register_lock);
Kent Overstreetee668502013-02-01 07:29:41 -08001211 bcache_device_unlink(d);
Slava Pestove5112202014-04-29 15:39:27 -07001212 mutex_unlock(&bch_register_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001213 kobject_del(&d->kobj);
1214 continue_at(cl, flash_dev_free, system_wq);
1215}
1216
1217static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1218{
1219 struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
1220 GFP_KERNEL);
1221 if (!d)
1222 return -ENOMEM;
1223
1224 closure_init(&d->cl, NULL);
1225 set_closure_fn(&d->cl, flash_dev_flush, system_wq);
1226
1227 kobject_init(&d->kobj, &bch_flash_dev_ktype);
1228
Kent Overstreet279afba2013-06-05 06:21:07 -07001229 if (bcache_device_init(d, block_bytes(c), u->sectors))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001230 goto err;
1231
1232 bcache_device_attach(d, c, u - c->uuids);
Tang Junhui175206c2017-09-07 01:28:53 +08001233 bch_sectors_dirty_init(d);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001234 bch_flash_dev_request_init(d);
1235 add_disk(d->disk);
1236
1237 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
1238 goto err;
1239
1240 bcache_device_link(d, c, "volume");
1241
1242 return 0;
1243err:
1244 kobject_put(&d->kobj);
1245 return -ENOMEM;
1246}
1247
1248static int flash_devs_run(struct cache_set *c)
1249{
1250 int ret = 0;
1251 struct uuid_entry *u;
1252
1253 for (u = c->uuids;
1254 u < c->uuids + c->nr_uuids && !ret;
1255 u++)
1256 if (UUID_FLASH_ONLY(u))
1257 ret = flash_dev_run(c, u);
1258
1259 return ret;
1260}
1261
1262int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1263{
1264 struct uuid_entry *u;
1265
1266 if (test_bit(CACHE_SET_STOPPING, &c->flags))
1267 return -EINTR;
1268
Slava Pestovbf0c55c2014-07-11 12:17:41 -07001269 if (!test_bit(CACHE_SET_RUNNING, &c->flags))
1270 return -EPERM;
1271
Kent Overstreetcafe5632013-03-23 16:11:31 -07001272 u = uuid_find_empty(c);
1273 if (!u) {
1274 pr_err("Can't create volume, no room for UUID");
1275 return -EINVAL;
1276 }
1277
1278 get_random_bytes(u->uuid, 16);
1279 memset(u->label, 0, 32);
1280 u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
1281
1282 SET_UUID_FLASH_ONLY(u, 1);
1283 u->sectors = size >> 9;
1284
1285 bch_uuid_write(c);
1286
1287 return flash_dev_run(c, u);
1288}
1289
1290/* Cache set */
1291
1292__printf(2, 3)
1293bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1294{
1295 va_list args;
1296
Kent Overstreet77c320e2013-07-11 19:42:51 -07001297 if (c->on_error != ON_ERROR_PANIC &&
1298 test_bit(CACHE_SET_STOPPING, &c->flags))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001299 return false;
1300
1301 /* XXX: we can be called from atomic context
1302 acquire_console_sem();
1303 */
1304
1305 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
1306
1307 va_start(args, fmt);
1308 vprintk(fmt, args);
1309 va_end(args);
1310
1311 printk(", disabling caching\n");
1312
Kent Overstreet77c320e2013-07-11 19:42:51 -07001313 if (c->on_error == ON_ERROR_PANIC)
1314 panic("panic forced after error\n");
1315
Kent Overstreetcafe5632013-03-23 16:11:31 -07001316 bch_cache_set_unregister(c);
1317 return true;
1318}
1319
1320void bch_cache_set_release(struct kobject *kobj)
1321{
1322 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
1323 kfree(c);
1324 module_put(THIS_MODULE);
1325}
1326
1327static void cache_set_free(struct closure *cl)
1328{
1329 struct cache_set *c = container_of(cl, struct cache_set, cl);
1330 struct cache *ca;
1331 unsigned i;
1332
1333 if (!IS_ERR_OR_NULL(c->debug))
1334 debugfs_remove(c->debug);
1335
1336 bch_open_buckets_free(c);
1337 bch_btree_cache_free(c);
1338 bch_journal_free(c);
1339
1340 for_each_cache(ca, c, i)
Slava Pestovc9a78332014-06-19 15:05:59 -07001341 if (ca) {
1342 ca->set = NULL;
1343 c->cache[ca->sb.nr_this_dev] = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001344 kobject_put(&ca->kobj);
Slava Pestovc9a78332014-06-19 15:05:59 -07001345 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001346
Kent Overstreet67539e82013-09-10 22:53:34 -07001347 bch_bset_sort_state_free(&c->sort);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001348 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001349
Nicholas Swensonda415a02014-01-09 16:03:04 -08001350 if (c->moving_gc_wq)
1351 destroy_workqueue(c->moving_gc_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001352 if (c->bio_split)
1353 bioset_free(c->bio_split);
Kent Overstreet57943512013-04-25 13:58:35 -07001354 if (c->fill_iter)
1355 mempool_destroy(c->fill_iter);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001356 if (c->bio_meta)
1357 mempool_destroy(c->bio_meta);
1358 if (c->search)
1359 mempool_destroy(c->search);
1360 kfree(c->devices);
1361
1362 mutex_lock(&bch_register_lock);
1363 list_del(&c->list);
1364 mutex_unlock(&bch_register_lock);
1365
1366 pr_info("Cache set %pU unregistered", c->sb.set_uuid);
1367 wake_up(&unregister_wait);
1368
1369 closure_debug_destroy(&c->cl);
1370 kobject_put(&c->kobj);
1371}
1372
1373static void cache_set_flush(struct closure *cl)
1374{
1375 struct cache_set *c = container_of(cl, struct cache_set, caching);
Kent Overstreet79826c32013-07-10 18:31:58 -07001376 struct cache *ca;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001377 struct btree *b;
Kent Overstreet79826c32013-07-10 18:31:58 -07001378 unsigned i;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001379
1380 bch_cache_accounting_destroy(&c->accounting);
1381
1382 kobject_put(&c->internal);
1383 kobject_del(&c->kobj);
1384
Kent Overstreet72a44512013-10-24 17:19:26 -07001385 if (c->gc_thread)
1386 kthread_stop(c->gc_thread);
1387
Kent Overstreetcafe5632013-03-23 16:11:31 -07001388 if (!IS_ERR_OR_NULL(c->root))
1389 list_add(&c->root->list, &c->btree_cache);
1390
1391 /* Should skip this if we're unregistering because of an error */
Kent Overstreet2a285682014-03-04 16:42:42 -08001392 list_for_each_entry(b, &c->btree_cache, list) {
1393 mutex_lock(&b->write_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001394 if (btree_node_dirty(b))
Kent Overstreet2a285682014-03-04 16:42:42 -08001395 __bch_btree_node_write(b, NULL);
1396 mutex_unlock(&b->write_lock);
1397 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001398
Kent Overstreet79826c32013-07-10 18:31:58 -07001399 for_each_cache(ca, c, i)
1400 if (ca->alloc_thread)
1401 kthread_stop(ca->alloc_thread);
1402
Kent Overstreet5b1016e2014-03-19 17:49:37 -07001403 if (c->journal.cur) {
1404 cancel_delayed_work_sync(&c->journal.work);
1405 /* flush last journal entry if needed */
1406 c->journal.work.work.func(&c->journal.work.work);
1407 }
Kent Overstreetdabb4432014-02-19 19:48:26 -08001408
Kent Overstreetcafe5632013-03-23 16:11:31 -07001409 closure_return(cl);
1410}
1411
1412static void __cache_set_unregister(struct closure *cl)
1413{
1414 struct cache_set *c = container_of(cl, struct cache_set, caching);
Kent Overstreet5caa52a2013-07-10 21:03:25 -07001415 struct cached_dev *dc;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001416 size_t i;
1417
1418 mutex_lock(&bch_register_lock);
1419
Kent Overstreetcafe5632013-03-23 16:11:31 -07001420 for (i = 0; i < c->nr_uuids; i++)
Kent Overstreet5caa52a2013-07-10 21:03:25 -07001421 if (c->devices[i]) {
1422 if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1423 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1424 dc = container_of(c->devices[i],
1425 struct cached_dev, disk);
1426 bch_cached_dev_detach(dc);
1427 } else {
1428 bcache_device_stop(c->devices[i]);
1429 }
1430 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001431
1432 mutex_unlock(&bch_register_lock);
1433
1434 continue_at(cl, cache_set_flush, system_wq);
1435}
1436
1437void bch_cache_set_stop(struct cache_set *c)
1438{
1439 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
1440 closure_queue(&c->caching);
1441}
1442
1443void bch_cache_set_unregister(struct cache_set *c)
1444{
1445 set_bit(CACHE_SET_UNREGISTERING, &c->flags);
1446 bch_cache_set_stop(c);
1447}
1448
1449#define alloc_bucket_pages(gfp, c) \
1450 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1451
1452struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1453{
1454 int iter_size;
1455 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
1456 if (!c)
1457 return NULL;
1458
1459 __module_get(THIS_MODULE);
1460 closure_init(&c->cl, NULL);
1461 set_closure_fn(&c->cl, cache_set_free, system_wq);
1462
1463 closure_init(&c->caching, &c->cl);
1464 set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
1465
1466 /* Maybe create continue_at_noreturn() and use it here? */
1467 closure_set_stopped(&c->cl);
1468 closure_put(&c->cl);
1469
1470 kobject_init(&c->kobj, &bch_cache_set_ktype);
1471 kobject_init(&c->internal, &bch_cache_set_internal_ktype);
1472
1473 bch_cache_accounting_init(&c->accounting, &c->cl);
1474
1475 memcpy(c->sb.set_uuid, sb->set_uuid, 16);
1476 c->sb.block_size = sb->block_size;
1477 c->sb.bucket_size = sb->bucket_size;
1478 c->sb.nr_in_set = sb->nr_in_set;
1479 c->sb.last_mount = sb->last_mount;
1480 c->bucket_bits = ilog2(sb->bucket_size);
1481 c->block_bits = ilog2(sb->block_size);
1482 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
1483
Kent Overstreetee811282013-12-17 23:49:49 -08001484 c->btree_pages = bucket_pages(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001485 if (c->btree_pages > BTREE_MAX_PAGES)
1486 c->btree_pages = max_t(int, c->btree_pages / 4,
1487 BTREE_MAX_PAGES);
1488
Kent Overstreetcb7a5832013-12-16 15:27:25 -08001489 sema_init(&c->sb_write_mutex, 1);
Kent Overstreete8e1d462013-07-24 17:27:07 -07001490 mutex_init(&c->bucket_lock);
Kent Overstreet0a63b662014-03-17 17:15:53 -07001491 init_waitqueue_head(&c->btree_cache_wait);
Kent Overstreet35fcd842013-07-24 17:29:09 -07001492 init_waitqueue_head(&c->bucket_wait);
Kent Overstreetbe628be2016-10-26 20:31:17 -07001493 init_waitqueue_head(&c->gc_wait);
Kent Overstreetcb7a5832013-12-16 15:27:25 -08001494 sema_init(&c->uuid_write_mutex, 1);
Kent Overstreet65d22e92013-07-31 00:03:54 -07001495
Kent Overstreet65d22e92013-07-31 00:03:54 -07001496 spin_lock_init(&c->btree_gc_time.lock);
1497 spin_lock_init(&c->btree_split_time.lock);
1498 spin_lock_init(&c->btree_read_time.lock);
Kent Overstreete8e1d462013-07-24 17:27:07 -07001499
Kent Overstreetcafe5632013-03-23 16:11:31 -07001500 bch_moving_init_cache_set(c);
1501
1502 INIT_LIST_HEAD(&c->list);
1503 INIT_LIST_HEAD(&c->cached_devs);
1504 INIT_LIST_HEAD(&c->btree_cache);
1505 INIT_LIST_HEAD(&c->btree_cache_freeable);
1506 INIT_LIST_HEAD(&c->btree_cache_freed);
1507 INIT_LIST_HEAD(&c->data_buckets);
1508
1509 c->search = mempool_create_slab_pool(32, bch_search_cache);
1510 if (!c->search)
1511 goto err;
1512
1513 iter_size = (sb->bucket_size / sb->block_size + 1) *
1514 sizeof(struct btree_iter_set);
1515
1516 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
1517 !(c->bio_meta = mempool_create_kmalloc_pool(2,
1518 sizeof(struct bbio) + sizeof(struct bio_vec) *
1519 bucket_pages(c))) ||
Kent Overstreet57943512013-04-25 13:58:35 -07001520 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
NeilBrown47e0fb42017-06-18 14:38:57 +10001521 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio),
1522 BIOSET_NEED_BVECS |
1523 BIOSET_NEED_RESCUER)) ||
Kent Overstreetcafe5632013-03-23 16:11:31 -07001524 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
Bhaktipriya Shridhar81baf902016-06-08 01:57:19 +05301525 !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
1526 WQ_MEM_RECLAIM, 0)) ||
Kent Overstreetcafe5632013-03-23 16:11:31 -07001527 bch_journal_alloc(c) ||
1528 bch_btree_cache_alloc(c) ||
Kent Overstreet67539e82013-09-10 22:53:34 -07001529 bch_open_buckets_alloc(c) ||
1530 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001531 goto err;
1532
Kent Overstreetcafe5632013-03-23 16:11:31 -07001533 c->congested_read_threshold_us = 2000;
1534 c->congested_write_threshold_us = 20000;
1535 c->error_limit = 8 << IO_ERROR_SHIFT;
1536
1537 return c;
1538err:
1539 bch_cache_set_unregister(c);
1540 return NULL;
1541}
1542
1543static void run_cache_set(struct cache_set *c)
1544{
1545 const char *err = "cannot allocate memory";
1546 struct cached_dev *dc, *t;
1547 struct cache *ca;
Kent Overstreetc18536a2013-07-24 17:44:17 -07001548 struct closure cl;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001549 unsigned i;
1550
Kent Overstreetc18536a2013-07-24 17:44:17 -07001551 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001552
1553 for_each_cache(ca, c, i)
1554 c->nbuckets += ca->sb.nbuckets;
Kent Overstreetbe628be2016-10-26 20:31:17 -07001555 set_gc_sectors(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001556
1557 if (CACHE_SYNC(&c->sb)) {
1558 LIST_HEAD(journal);
1559 struct bkey *k;
1560 struct jset *j;
1561
1562 err = "cannot allocate memory for journal";
Kent Overstreetc18536a2013-07-24 17:44:17 -07001563 if (bch_journal_read(c, &journal))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001564 goto err;
1565
1566 pr_debug("btree_journal_read() done");
1567
1568 err = "no journal entries found";
1569 if (list_empty(&journal))
1570 goto err;
1571
1572 j = &list_entry(journal.prev, struct journal_replay, list)->j;
1573
1574 err = "IO error reading priorities";
1575 for_each_cache(ca, c, i)
1576 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
1577
1578 /*
1579 * If prio_read() fails it'll call cache_set_error and we'll
1580 * tear everything down right away, but if we perhaps checked
1581 * sooner we could avoid journal replay.
1582 */
1583
1584 k = &j->btree_root;
1585
1586 err = "bad btree root";
Kent Overstreet65d45232013-12-20 17:22:05 -08001587 if (__bch_btree_ptr_invalid(c, k))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001588 goto err;
1589
1590 err = "error reading btree root";
Slava Pestov2452cc82014-07-12 00:22:53 -07001591 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001592 if (IS_ERR_OR_NULL(c->root))
1593 goto err;
1594
1595 list_del_init(&c->root->list);
1596 rw_unlock(true, c->root);
1597
Kent Overstreetc18536a2013-07-24 17:44:17 -07001598 err = uuid_read(c, j, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001599 if (err)
1600 goto err;
1601
1602 err = "error in recovery";
Kent Overstreetc18536a2013-07-24 17:44:17 -07001603 if (bch_btree_check(c))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001604 goto err;
1605
1606 bch_journal_mark(c, &journal);
Kent Overstreet2531d9ee2014-03-17 16:55:55 -07001607 bch_initial_gc_finish(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001608 pr_debug("btree_check() done");
1609
1610 /*
1611 * bcache_journal_next() can't happen sooner, or
1612 * btree_gc_finish() will give spurious errors about last_gc >
1613 * gc_gen - this is a hack but oh well.
1614 */
1615 bch_journal_next(&c->journal);
1616
Kent Overstreet119ba0f2013-04-24 19:01:12 -07001617 err = "error starting allocator thread";
Kent Overstreetcafe5632013-03-23 16:11:31 -07001618 for_each_cache(ca, c, i)
Kent Overstreet119ba0f2013-04-24 19:01:12 -07001619 if (bch_cache_allocator_start(ca))
1620 goto err;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001621
1622 /*
1623 * First place it's safe to allocate: btree_check() and
1624 * btree_gc_finish() have to run before we have buckets to
1625 * allocate, and bch_bucket_alloc_set() might cause a journal
1626 * entry to be written so bcache_journal_next() has to be called
1627 * first.
1628 *
1629 * If the uuids were in the old format we have to rewrite them
1630 * before the next journal entry is written:
1631 */
1632 if (j->version < BCACHE_JSET_VERSION_UUID)
1633 __uuid_write(c);
1634
Kent Overstreetc18536a2013-07-24 17:44:17 -07001635 bch_journal_replay(c, &journal);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001636 } else {
1637 pr_notice("invalidating existing data");
Kent Overstreetcafe5632013-03-23 16:11:31 -07001638
1639 for_each_cache(ca, c, i) {
1640 unsigned j;
1641
1642 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
1643 2, SB_JOURNAL_BUCKETS);
1644
1645 for (j = 0; j < ca->sb.keys; j++)
1646 ca->sb.d[j] = ca->sb.first_bucket + j;
1647 }
1648
Kent Overstreet2531d9ee2014-03-17 16:55:55 -07001649 bch_initial_gc_finish(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001650
Kent Overstreet119ba0f2013-04-24 19:01:12 -07001651 err = "error starting allocator thread";
Kent Overstreetcafe5632013-03-23 16:11:31 -07001652 for_each_cache(ca, c, i)
Kent Overstreet119ba0f2013-04-24 19:01:12 -07001653 if (bch_cache_allocator_start(ca))
1654 goto err;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001655
1656 mutex_lock(&c->bucket_lock);
1657 for_each_cache(ca, c, i)
1658 bch_prio_write(ca);
1659 mutex_unlock(&c->bucket_lock);
1660
Kent Overstreetcafe5632013-03-23 16:11:31 -07001661 err = "cannot allocate new UUID bucket";
1662 if (__uuid_write(c))
Kent Overstreet72a44512013-10-24 17:19:26 -07001663 goto err;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001664
1665 err = "cannot allocate new btree root";
Slava Pestov2452cc82014-07-12 00:22:53 -07001666 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001667 if (IS_ERR_OR_NULL(c->root))
Kent Overstreet72a44512013-10-24 17:19:26 -07001668 goto err;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001669
Kent Overstreet2a285682014-03-04 16:42:42 -08001670 mutex_lock(&c->root->write_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001671 bkey_copy_key(&c->root->key, &MAX_KEY);
Kent Overstreetc18536a2013-07-24 17:44:17 -07001672 bch_btree_node_write(c->root, &cl);
Kent Overstreet2a285682014-03-04 16:42:42 -08001673 mutex_unlock(&c->root->write_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001674
1675 bch_btree_set_root(c->root);
1676 rw_unlock(true, c->root);
1677
1678 /*
1679 * We don't want to write the first journal entry until
1680 * everything is set up - fortunately journal entries won't be
1681 * written until the SET_CACHE_SYNC() here:
1682 */
1683 SET_CACHE_SYNC(&c->sb, true);
1684
1685 bch_journal_next(&c->journal);
Kent Overstreetc18536a2013-07-24 17:44:17 -07001686 bch_journal_meta(c, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001687 }
1688
Kent Overstreet72a44512013-10-24 17:19:26 -07001689 err = "error starting gc thread";
1690 if (bch_gc_thread_start(c))
1691 goto err;
1692
Kent Overstreetc18536a2013-07-24 17:44:17 -07001693 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001694 c->sb.last_mount = get_seconds();
1695 bcache_write_super(c);
1696
1697 list_for_each_entry_safe(dc, t, &uncached_devices, list)
1698 bch_cached_dev_attach(dc, c);
1699
1700 flash_devs_run(c);
1701
Slava Pestovbf0c55c2014-07-11 12:17:41 -07001702 set_bit(CACHE_SET_RUNNING, &c->flags);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001703 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001704err:
Kent Overstreetc18536a2013-07-24 17:44:17 -07001705 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001706 /* XXX: test this, it's broken */
Kees Cookc8694942013-09-10 21:41:34 -07001707 bch_cache_set_error(c, "%s", err);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001708}
1709
1710static bool can_attach_cache(struct cache *ca, struct cache_set *c)
1711{
1712 return ca->sb.block_size == c->sb.block_size &&
Nicholas Swenson9eb8ebe2013-10-22 13:19:23 -07001713 ca->sb.bucket_size == c->sb.bucket_size &&
Kent Overstreetcafe5632013-03-23 16:11:31 -07001714 ca->sb.nr_in_set == c->sb.nr_in_set;
1715}
1716
1717static const char *register_cache_set(struct cache *ca)
1718{
1719 char buf[12];
1720 const char *err = "cannot allocate memory";
1721 struct cache_set *c;
1722
1723 list_for_each_entry(c, &bch_cache_sets, list)
1724 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
1725 if (c->cache[ca->sb.nr_this_dev])
1726 return "duplicate cache set member";
1727
1728 if (!can_attach_cache(ca, c))
1729 return "cache sb does not match set";
1730
1731 if (!CACHE_SYNC(&ca->sb))
1732 SET_CACHE_SYNC(&c->sb, false);
1733
1734 goto found;
1735 }
1736
1737 c = bch_cache_set_alloc(&ca->sb);
1738 if (!c)
1739 return err;
1740
1741 err = "error creating kobject";
1742 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
1743 kobject_add(&c->internal, &c->kobj, "internal"))
1744 goto err;
1745
1746 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
1747 goto err;
1748
1749 bch_debug_init_cache_set(c);
1750
1751 list_add(&c->list, &bch_cache_sets);
1752found:
1753 sprintf(buf, "cache%i", ca->sb.nr_this_dev);
1754 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
1755 sysfs_create_link(&c->kobj, &ca->kobj, buf))
1756 goto err;
1757
1758 if (ca->sb.seq > c->sb.seq) {
1759 c->sb.version = ca->sb.version;
1760 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
1761 c->sb.flags = ca->sb.flags;
1762 c->sb.seq = ca->sb.seq;
1763 pr_debug("set version = %llu", c->sb.version);
1764 }
1765
Kent Overstreetd83353b2014-06-11 19:44:49 -07001766 kobject_get(&ca->kobj);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001767 ca->set = c;
1768 ca->set->cache[ca->sb.nr_this_dev] = ca;
1769 c->cache_by_alloc[c->caches_loaded++] = ca;
1770
1771 if (c->caches_loaded == c->sb.nr_in_set)
1772 run_cache_set(c);
1773
1774 return NULL;
1775err:
1776 bch_cache_set_unregister(c);
1777 return err;
1778}
1779
1780/* Cache device */
1781
1782void bch_cache_release(struct kobject *kobj)
1783{
1784 struct cache *ca = container_of(kobj, struct cache, kobj);
Kent Overstreet78365412013-12-17 01:29:34 -08001785 unsigned i;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001786
Slava Pestovc9a78332014-06-19 15:05:59 -07001787 if (ca->set) {
1788 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001789 ca->set->cache[ca->sb.nr_this_dev] = NULL;
Slava Pestovc9a78332014-06-19 15:05:59 -07001790 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001791
Kent Overstreetcafe5632013-03-23 16:11:31 -07001792 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
1793 kfree(ca->prio_buckets);
1794 vfree(ca->buckets);
1795
1796 free_heap(&ca->heap);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001797 free_fifo(&ca->free_inc);
Kent Overstreet78365412013-12-17 01:29:34 -08001798
1799 for (i = 0; i < RESERVE_NR; i++)
1800 free_fifo(&ca->free[i]);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001801
1802 if (ca->sb_bio.bi_inline_vecs[0].bv_page)
1803 put_page(ca->sb_bio.bi_io_vec[0].bv_page);
1804
Kent Overstreet0781c872014-07-07 13:03:36 -07001805 if (!IS_ERR_OR_NULL(ca->bdev))
Kent Overstreetcafe5632013-03-23 16:11:31 -07001806 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001807
1808 kfree(ca);
1809 module_put(THIS_MODULE);
1810}
1811
Yijing Wangc50d4d52016-07-04 09:23:25 +08001812static int cache_alloc(struct cache *ca)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001813{
1814 size_t free;
1815 struct bucket *b;
1816
Kent Overstreetcafe5632013-03-23 16:11:31 -07001817 __module_get(THIS_MODULE);
1818 kobject_init(&ca->kobj, &bch_cache_ktype);
1819
Ming Lei3a83f462016-11-22 08:57:21 -07001820 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001821
Kent Overstreet78365412013-12-17 01:29:34 -08001822 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001823
Kent Overstreet78365412013-12-17 01:29:34 -08001824 if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
Kent Overstreetacc9cf82016-08-17 18:21:24 -07001825 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
Kent Overstreet78365412013-12-17 01:29:34 -08001826 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
1827 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
Kent Overstreetcafe5632013-03-23 16:11:31 -07001828 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
Kent Overstreetcafe5632013-03-23 16:11:31 -07001829 !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
Kent Overstreetf59fce82013-05-15 00:11:26 -07001830 !(ca->buckets = vzalloc(sizeof(struct bucket) *
Kent Overstreetcafe5632013-03-23 16:11:31 -07001831 ca->sb.nbuckets)) ||
1832 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1833 2, GFP_KERNEL)) ||
Kent Overstreet749b61d2013-11-23 23:11:25 -08001834 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)))
Kent Overstreetf59fce82013-05-15 00:11:26 -07001835 return -ENOMEM;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001836
1837 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
1838
Kent Overstreetcafe5632013-03-23 16:11:31 -07001839 for_each_bucket(b, ca)
1840 atomic_set(&b->pin, 0);
1841
Kent Overstreetcafe5632013-03-23 16:11:31 -07001842 return 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001843}
1844
Eric Wheeler9b299722016-02-26 14:33:56 -08001845static int register_cache(struct cache_sb *sb, struct page *sb_page,
Slava Pestovc9a78332014-06-19 15:05:59 -07001846 struct block_device *bdev, struct cache *ca)
Kent Overstreetcafe5632013-03-23 16:11:31 -07001847{
1848 char name[BDEVNAME_SIZE];
Eric Wheelerd9dc1702016-06-17 15:01:54 -07001849 const char *err = NULL; /* must be set for any error case */
Eric Wheeler9b299722016-02-26 14:33:56 -08001850 int ret = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001851
Kent Overstreetf59fce82013-05-15 00:11:26 -07001852 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
Kent Overstreetcafe5632013-03-23 16:11:31 -07001853 ca->bdev = bdev;
1854 ca->bdev->bd_holder = ca;
1855
Ming Lei3a83f462016-11-22 08:57:21 -07001856 bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001857 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1858 get_page(sb_page);
1859
Kent Overstreetcafe5632013-03-23 16:11:31 -07001860 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1861 ca->discard = CACHE_DISCARD(&ca->sb);
1862
Yijing Wangc50d4d52016-07-04 09:23:25 +08001863 ret = cache_alloc(ca);
Eric Wheelerd9dc1702016-06-17 15:01:54 -07001864 if (ret != 0) {
1865 if (ret == -ENOMEM)
1866 err = "cache_alloc(): -ENOMEM";
1867 else
1868 err = "cache_alloc(): unknown error";
Kent Overstreetf59fce82013-05-15 00:11:26 -07001869 goto err;
Eric Wheelerd9dc1702016-06-17 15:01:54 -07001870 }
Kent Overstreetf59fce82013-05-15 00:11:26 -07001871
Eric Wheeler9b299722016-02-26 14:33:56 -08001872 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
1873 err = "error calling kobject_add";
1874 ret = -ENOMEM;
1875 goto out;
1876 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001877
Kent Overstreet4fa03402014-03-17 18:58:55 -07001878 mutex_lock(&bch_register_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001879 err = register_cache_set(ca);
Kent Overstreet4fa03402014-03-17 18:58:55 -07001880 mutex_unlock(&bch_register_lock);
1881
Eric Wheeler9b299722016-02-26 14:33:56 -08001882 if (err) {
1883 ret = -ENODEV;
1884 goto out;
1885 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001886
1887 pr_info("registered cache device %s", bdevname(bdev, name));
Eric Wheeler9b299722016-02-26 14:33:56 -08001888
Kent Overstreetd83353b2014-06-11 19:44:49 -07001889out:
1890 kobject_put(&ca->kobj);
Eric Wheeler9b299722016-02-26 14:33:56 -08001891
Kent Overstreetcafe5632013-03-23 16:11:31 -07001892err:
Eric Wheeler9b299722016-02-26 14:33:56 -08001893 if (err)
1894 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1895
1896 return ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001897}
1898
1899/* Global interfaces/init */
1900
1901static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
1902 const char *, size_t);
1903
1904kobj_attribute_write(register, register_bcache);
1905kobj_attribute_write(register_quiet, register_bcache);
1906
Gabriel de Perthuisa9dd53a2013-05-04 12:19:41 +02001907static bool bch_is_open_backing(struct block_device *bdev) {
1908 struct cache_set *c, *tc;
1909 struct cached_dev *dc, *t;
1910
1911 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1912 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
1913 if (dc->bdev == bdev)
1914 return true;
1915 list_for_each_entry_safe(dc, t, &uncached_devices, list)
1916 if (dc->bdev == bdev)
1917 return true;
1918 return false;
1919}
1920
1921static bool bch_is_open_cache(struct block_device *bdev) {
1922 struct cache_set *c, *tc;
1923 struct cache *ca;
1924 unsigned i;
1925
1926 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
1927 for_each_cache(ca, c, i)
1928 if (ca->bdev == bdev)
1929 return true;
1930 return false;
1931}
1932
1933static bool bch_is_open(struct block_device *bdev) {
1934 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
1935}
1936
Kent Overstreetcafe5632013-03-23 16:11:31 -07001937static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1938 const char *buffer, size_t size)
1939{
1940 ssize_t ret = size;
1941 const char *err = "cannot allocate memory";
1942 char *path = NULL;
1943 struct cache_sb *sb = NULL;
1944 struct block_device *bdev = NULL;
1945 struct page *sb_page = NULL;
1946
1947 if (!try_module_get(THIS_MODULE))
1948 return -EBUSY;
1949
Kent Overstreetcafe5632013-03-23 16:11:31 -07001950 if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
1951 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
1952 goto err;
1953
1954 err = "failed to open device";
1955 bdev = blkdev_get_by_path(strim(path),
1956 FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1957 sb);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001958 if (IS_ERR(bdev)) {
Gabriel de Perthuisa9dd53a2013-05-04 12:19:41 +02001959 if (bdev == ERR_PTR(-EBUSY)) {
1960 bdev = lookup_bdev(strim(path));
Jianjian Huo789d21d2014-07-13 09:08:59 -07001961 mutex_lock(&bch_register_lock);
Gabriel de Perthuisa9dd53a2013-05-04 12:19:41 +02001962 if (!IS_ERR(bdev) && bch_is_open(bdev))
1963 err = "device already registered";
1964 else
1965 err = "device busy";
Jianjian Huo789d21d2014-07-13 09:08:59 -07001966 mutex_unlock(&bch_register_lock);
Jan Kara4b758df2017-09-06 14:25:51 +08001967 if (!IS_ERR(bdev))
1968 bdput(bdev);
Gabriel de Perthuisd7076f22015-11-29 18:40:23 -08001969 if (attr == &ksysfs_register_quiet)
1970 goto out;
Gabriel de Perthuisa9dd53a2013-05-04 12:19:41 +02001971 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07001972 goto err;
Kent Overstreetf59fce82013-05-15 00:11:26 -07001973 }
1974
1975 err = "failed to set blocksize";
1976 if (set_blocksize(bdev, 4096))
1977 goto err_close;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001978
1979 err = read_super(sb, bdev, &sb_page);
1980 if (err)
1981 goto err_close;
1982
Kent Overstreet29033812013-04-11 15:14:35 -07001983 if (SB_IS_BDEV(sb)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -07001984 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001985 if (!dc)
1986 goto err_close;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001987
Kent Overstreet4fa03402014-03-17 18:58:55 -07001988 mutex_lock(&bch_register_lock);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001989 register_bdev(sb, sb_page, bdev, dc);
Kent Overstreet4fa03402014-03-17 18:58:55 -07001990 mutex_unlock(&bch_register_lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -07001991 } else {
1992 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
Kent Overstreetf59fce82013-05-15 00:11:26 -07001993 if (!ca)
1994 goto err_close;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001995
Eric Wheeler9b299722016-02-26 14:33:56 -08001996 if (register_cache(sb, sb_page, bdev, ca) != 0)
1997 goto err_close;
Kent Overstreetcafe5632013-03-23 16:11:31 -07001998 }
Kent Overstreetf59fce82013-05-15 00:11:26 -07001999out:
2000 if (sb_page)
Kent Overstreetcafe5632013-03-23 16:11:31 -07002001 put_page(sb_page);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002002 kfree(sb);
2003 kfree(path);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002004 module_put(THIS_MODULE);
2005 return ret;
Kent Overstreetf59fce82013-05-15 00:11:26 -07002006
2007err_close:
2008 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2009err:
Gabriel de Perthuisd7076f22015-11-29 18:40:23 -08002010 pr_info("error opening %s: %s", path, err);
Kent Overstreetf59fce82013-05-15 00:11:26 -07002011 ret = -EINVAL;
2012 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -07002013}
2014
2015static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
2016{
2017 if (code == SYS_DOWN ||
2018 code == SYS_HALT ||
2019 code == SYS_POWER_OFF) {
2020 DEFINE_WAIT(wait);
2021 unsigned long start = jiffies;
2022 bool stopped = false;
2023
2024 struct cache_set *c, *tc;
2025 struct cached_dev *dc, *tdc;
2026
2027 mutex_lock(&bch_register_lock);
2028
2029 if (list_empty(&bch_cache_sets) &&
2030 list_empty(&uncached_devices))
2031 goto out;
2032
2033 pr_info("Stopping all devices:");
2034
2035 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2036 bch_cache_set_stop(c);
2037
2038 list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
2039 bcache_device_stop(&dc->disk);
2040
2041 /* What's a condition variable? */
2042 while (1) {
2043 long timeout = start + 2 * HZ - jiffies;
2044
2045 stopped = list_empty(&bch_cache_sets) &&
2046 list_empty(&uncached_devices);
2047
2048 if (timeout < 0 || stopped)
2049 break;
2050
2051 prepare_to_wait(&unregister_wait, &wait,
2052 TASK_UNINTERRUPTIBLE);
2053
2054 mutex_unlock(&bch_register_lock);
2055 schedule_timeout(timeout);
2056 mutex_lock(&bch_register_lock);
2057 }
2058
2059 finish_wait(&unregister_wait, &wait);
2060
2061 if (stopped)
2062 pr_info("All devices stopped");
2063 else
2064 pr_notice("Timeout waiting for devices to be closed");
2065out:
2066 mutex_unlock(&bch_register_lock);
2067 }
2068
2069 return NOTIFY_DONE;
2070}
2071
2072static struct notifier_block reboot = {
2073 .notifier_call = bcache_reboot,
2074 .priority = INT_MAX, /* before any real devices */
2075};
2076
2077static void bcache_exit(void)
2078{
2079 bch_debug_exit();
Kent Overstreetcafe5632013-03-23 16:11:31 -07002080 bch_request_exit();
Kent Overstreetcafe5632013-03-23 16:11:31 -07002081 if (bcache_kobj)
2082 kobject_put(bcache_kobj);
2083 if (bcache_wq)
2084 destroy_workqueue(bcache_wq);
Kent Overstreet5c41c8a2013-07-08 17:53:26 -07002085 if (bcache_major)
2086 unregister_blkdev(bcache_major, "bcache");
Kent Overstreetcafe5632013-03-23 16:11:31 -07002087 unregister_reboot_notifier(&reboot);
2088}
2089
2090static int __init bcache_init(void)
2091{
2092 static const struct attribute *files[] = {
2093 &ksysfs_register.attr,
2094 &ksysfs_register_quiet.attr,
2095 NULL
2096 };
2097
2098 mutex_init(&bch_register_lock);
2099 init_waitqueue_head(&unregister_wait);
2100 register_reboot_notifier(&reboot);
Kent Overstreet07e86cc2013-03-25 11:46:43 -07002101 closure_debug_init();
Kent Overstreetcafe5632013-03-23 16:11:31 -07002102
2103 bcache_major = register_blkdev(0, "bcache");
Zheng Liu2ecf0cd2015-11-29 17:21:57 -08002104 if (bcache_major < 0) {
2105 unregister_reboot_notifier(&reboot);
Kent Overstreetcafe5632013-03-23 16:11:31 -07002106 return bcache_major;
Zheng Liu2ecf0cd2015-11-29 17:21:57 -08002107 }
Kent Overstreetcafe5632013-03-23 16:11:31 -07002108
Bhaktipriya Shridhar81baf902016-06-08 01:57:19 +05302109 if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
Kent Overstreetcafe5632013-03-23 16:11:31 -07002110 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
2111 sysfs_create_files(bcache_kobj, files) ||
Kent Overstreetcafe5632013-03-23 16:11:31 -07002112 bch_request_init() ||
Kent Overstreetcafe5632013-03-23 16:11:31 -07002113 bch_debug_init(bcache_kobj))
2114 goto err;
2115
2116 return 0;
2117err:
2118 bcache_exit();
2119 return -ENOMEM;
2120}
2121
2122module_exit(bcache_exit);
2123module_init(bcache_init);