Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1 | /* |
| 2 | * bcache setup/teardown code, and some metadata io - read a superblock and |
| 3 | * figure out what to do with it. |
| 4 | * |
| 5 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> |
| 6 | * Copyright 2012 Google, Inc. |
| 7 | */ |
| 8 | |
| 9 | #include "bcache.h" |
| 10 | #include "btree.h" |
| 11 | #include "debug.h" |
| 12 | #include "request.h" |
| 13 | |
| 14 | #include <linux/buffer_head.h> |
| 15 | #include <linux/debugfs.h> |
| 16 | #include <linux/genhd.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/random.h> |
| 19 | #include <linux/reboot.h> |
| 20 | #include <linux/sysfs.h> |
| 21 | |
| 22 | MODULE_LICENSE("GPL"); |
| 23 | MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); |
| 24 | |
| 25 | static const char bcache_magic[] = { |
| 26 | 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, |
| 27 | 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 |
| 28 | }; |
| 29 | |
| 30 | static const char invalid_uuid[] = { |
| 31 | 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, |
| 32 | 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 |
| 33 | }; |
| 34 | |
| 35 | /* Default is -1; we skip past it for struct cached_dev's cache mode */ |
| 36 | const char * const bch_cache_modes[] = { |
| 37 | "default", |
| 38 | "writethrough", |
| 39 | "writeback", |
| 40 | "writearound", |
| 41 | "none", |
| 42 | NULL |
| 43 | }; |
| 44 | |
| 45 | struct uuid_entry_v0 { |
| 46 | uint8_t uuid[16]; |
| 47 | uint8_t label[32]; |
| 48 | uint32_t first_reg; |
| 49 | uint32_t last_reg; |
| 50 | uint32_t invalidated; |
| 51 | uint32_t pad; |
| 52 | }; |
| 53 | |
| 54 | static struct kobject *bcache_kobj; |
| 55 | struct mutex bch_register_lock; |
| 56 | LIST_HEAD(bch_cache_sets); |
| 57 | static LIST_HEAD(uncached_devices); |
| 58 | |
| 59 | static int bcache_major, bcache_minor; |
| 60 | static wait_queue_head_t unregister_wait; |
| 61 | struct workqueue_struct *bcache_wq; |
| 62 | |
| 63 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) |
| 64 | |
| 65 | static void bio_split_pool_free(struct bio_split_pool *p) |
| 66 | { |
Kent Overstreet | 8ef7479 | 2013-04-05 13:46:13 -0700 | [diff] [blame^] | 67 | if (p->bio_split_hook) |
| 68 | mempool_destroy(p->bio_split_hook); |
| 69 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 70 | if (p->bio_split) |
| 71 | bioset_free(p->bio_split); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | static int bio_split_pool_init(struct bio_split_pool *p) |
| 75 | { |
| 76 | p->bio_split = bioset_create(4, 0); |
| 77 | if (!p->bio_split) |
| 78 | return -ENOMEM; |
| 79 | |
| 80 | p->bio_split_hook = mempool_create_kmalloc_pool(4, |
| 81 | sizeof(struct bio_split_hook)); |
| 82 | if (!p->bio_split_hook) |
| 83 | return -ENOMEM; |
| 84 | |
| 85 | return 0; |
| 86 | } |
| 87 | |
| 88 | /* Superblock */ |
| 89 | |
| 90 | static const char *read_super(struct cache_sb *sb, struct block_device *bdev, |
| 91 | struct page **res) |
| 92 | { |
| 93 | const char *err; |
| 94 | struct cache_sb *s; |
| 95 | struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); |
| 96 | unsigned i; |
| 97 | |
| 98 | if (!bh) |
| 99 | return "IO error"; |
| 100 | |
| 101 | s = (struct cache_sb *) bh->b_data; |
| 102 | |
| 103 | sb->offset = le64_to_cpu(s->offset); |
| 104 | sb->version = le64_to_cpu(s->version); |
| 105 | |
| 106 | memcpy(sb->magic, s->magic, 16); |
| 107 | memcpy(sb->uuid, s->uuid, 16); |
| 108 | memcpy(sb->set_uuid, s->set_uuid, 16); |
| 109 | memcpy(sb->label, s->label, SB_LABEL_SIZE); |
| 110 | |
| 111 | sb->flags = le64_to_cpu(s->flags); |
| 112 | sb->seq = le64_to_cpu(s->seq); |
| 113 | |
| 114 | sb->nbuckets = le64_to_cpu(s->nbuckets); |
| 115 | sb->block_size = le16_to_cpu(s->block_size); |
| 116 | sb->bucket_size = le16_to_cpu(s->bucket_size); |
| 117 | |
| 118 | sb->nr_in_set = le16_to_cpu(s->nr_in_set); |
| 119 | sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); |
| 120 | sb->last_mount = le32_to_cpu(s->last_mount); |
| 121 | |
| 122 | sb->first_bucket = le16_to_cpu(s->first_bucket); |
| 123 | sb->keys = le16_to_cpu(s->keys); |
| 124 | |
| 125 | for (i = 0; i < SB_JOURNAL_BUCKETS; i++) |
| 126 | sb->d[i] = le64_to_cpu(s->d[i]); |
| 127 | |
| 128 | pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", |
| 129 | sb->version, sb->flags, sb->seq, sb->keys); |
| 130 | |
| 131 | err = "Not a bcache superblock"; |
| 132 | if (sb->offset != SB_SECTOR) |
| 133 | goto err; |
| 134 | |
| 135 | if (memcmp(sb->magic, bcache_magic, 16)) |
| 136 | goto err; |
| 137 | |
| 138 | err = "Too many journal buckets"; |
| 139 | if (sb->keys > SB_JOURNAL_BUCKETS) |
| 140 | goto err; |
| 141 | |
| 142 | err = "Bad checksum"; |
| 143 | if (s->csum != csum_set(s)) |
| 144 | goto err; |
| 145 | |
| 146 | err = "Bad UUID"; |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 147 | if (bch_is_zero(sb->uuid, 16)) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 148 | goto err; |
| 149 | |
| 150 | err = "Unsupported superblock version"; |
| 151 | if (sb->version > BCACHE_SB_VERSION) |
| 152 | goto err; |
| 153 | |
| 154 | err = "Bad block/bucket size"; |
| 155 | if (!is_power_of_2(sb->block_size) || sb->block_size > PAGE_SECTORS || |
| 156 | !is_power_of_2(sb->bucket_size) || sb->bucket_size < PAGE_SECTORS) |
| 157 | goto err; |
| 158 | |
| 159 | err = "Too many buckets"; |
| 160 | if (sb->nbuckets > LONG_MAX) |
| 161 | goto err; |
| 162 | |
| 163 | err = "Not enough buckets"; |
| 164 | if (sb->nbuckets < 1 << 7) |
| 165 | goto err; |
| 166 | |
| 167 | err = "Invalid superblock: device too small"; |
| 168 | if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) |
| 169 | goto err; |
| 170 | |
| 171 | if (sb->version == CACHE_BACKING_DEV) |
| 172 | goto out; |
| 173 | |
| 174 | err = "Bad UUID"; |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 175 | if (bch_is_zero(sb->set_uuid, 16)) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 176 | goto err; |
| 177 | |
| 178 | err = "Bad cache device number in set"; |
| 179 | if (!sb->nr_in_set || |
| 180 | sb->nr_in_set <= sb->nr_this_dev || |
| 181 | sb->nr_in_set > MAX_CACHES_PER_SET) |
| 182 | goto err; |
| 183 | |
| 184 | err = "Journal buckets not sequential"; |
| 185 | for (i = 0; i < sb->keys; i++) |
| 186 | if (sb->d[i] != sb->first_bucket + i) |
| 187 | goto err; |
| 188 | |
| 189 | err = "Too many journal buckets"; |
| 190 | if (sb->first_bucket + sb->keys > sb->nbuckets) |
| 191 | goto err; |
| 192 | |
| 193 | err = "Invalid superblock: first bucket comes before end of super"; |
| 194 | if (sb->first_bucket * sb->bucket_size < 16) |
| 195 | goto err; |
| 196 | out: |
| 197 | sb->last_mount = get_seconds(); |
| 198 | err = NULL; |
| 199 | |
| 200 | get_page(bh->b_page); |
| 201 | *res = bh->b_page; |
| 202 | err: |
| 203 | put_bh(bh); |
| 204 | return err; |
| 205 | } |
| 206 | |
| 207 | static void write_bdev_super_endio(struct bio *bio, int error) |
| 208 | { |
| 209 | struct cached_dev *dc = bio->bi_private; |
| 210 | /* XXX: error checking */ |
| 211 | |
| 212 | closure_put(&dc->sb_write.cl); |
| 213 | } |
| 214 | |
| 215 | static void __write_super(struct cache_sb *sb, struct bio *bio) |
| 216 | { |
| 217 | struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); |
| 218 | unsigned i; |
| 219 | |
| 220 | bio->bi_sector = SB_SECTOR; |
| 221 | bio->bi_rw = REQ_SYNC|REQ_META; |
| 222 | bio->bi_size = SB_SIZE; |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 223 | bch_bio_map(bio, NULL); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 224 | |
| 225 | out->offset = cpu_to_le64(sb->offset); |
| 226 | out->version = cpu_to_le64(sb->version); |
| 227 | |
| 228 | memcpy(out->uuid, sb->uuid, 16); |
| 229 | memcpy(out->set_uuid, sb->set_uuid, 16); |
| 230 | memcpy(out->label, sb->label, SB_LABEL_SIZE); |
| 231 | |
| 232 | out->flags = cpu_to_le64(sb->flags); |
| 233 | out->seq = cpu_to_le64(sb->seq); |
| 234 | |
| 235 | out->last_mount = cpu_to_le32(sb->last_mount); |
| 236 | out->first_bucket = cpu_to_le16(sb->first_bucket); |
| 237 | out->keys = cpu_to_le16(sb->keys); |
| 238 | |
| 239 | for (i = 0; i < sb->keys; i++) |
| 240 | out->d[i] = cpu_to_le64(sb->d[i]); |
| 241 | |
| 242 | out->csum = csum_set(out); |
| 243 | |
| 244 | pr_debug("ver %llu, flags %llu, seq %llu", |
| 245 | sb->version, sb->flags, sb->seq); |
| 246 | |
| 247 | submit_bio(REQ_WRITE, bio); |
| 248 | } |
| 249 | |
| 250 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) |
| 251 | { |
| 252 | struct closure *cl = &dc->sb_write.cl; |
| 253 | struct bio *bio = &dc->sb_bio; |
| 254 | |
| 255 | closure_lock(&dc->sb_write, parent); |
| 256 | |
| 257 | bio_reset(bio); |
| 258 | bio->bi_bdev = dc->bdev; |
| 259 | bio->bi_end_io = write_bdev_super_endio; |
| 260 | bio->bi_private = dc; |
| 261 | |
| 262 | closure_get(cl); |
| 263 | __write_super(&dc->sb, bio); |
| 264 | |
| 265 | closure_return(cl); |
| 266 | } |
| 267 | |
| 268 | static void write_super_endio(struct bio *bio, int error) |
| 269 | { |
| 270 | struct cache *ca = bio->bi_private; |
| 271 | |
| 272 | bch_count_io_errors(ca, error, "writing superblock"); |
| 273 | closure_put(&ca->set->sb_write.cl); |
| 274 | } |
| 275 | |
| 276 | void bcache_write_super(struct cache_set *c) |
| 277 | { |
| 278 | struct closure *cl = &c->sb_write.cl; |
| 279 | struct cache *ca; |
| 280 | unsigned i; |
| 281 | |
| 282 | closure_lock(&c->sb_write, &c->cl); |
| 283 | |
| 284 | c->sb.seq++; |
| 285 | |
| 286 | for_each_cache(ca, c, i) { |
| 287 | struct bio *bio = &ca->sb_bio; |
| 288 | |
| 289 | ca->sb.version = BCACHE_SB_VERSION; |
| 290 | ca->sb.seq = c->sb.seq; |
| 291 | ca->sb.last_mount = c->sb.last_mount; |
| 292 | |
| 293 | SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); |
| 294 | |
| 295 | bio_reset(bio); |
| 296 | bio->bi_bdev = ca->bdev; |
| 297 | bio->bi_end_io = write_super_endio; |
| 298 | bio->bi_private = ca; |
| 299 | |
| 300 | closure_get(cl); |
| 301 | __write_super(&ca->sb, bio); |
| 302 | } |
| 303 | |
| 304 | closure_return(cl); |
| 305 | } |
| 306 | |
| 307 | /* UUID io */ |
| 308 | |
| 309 | static void uuid_endio(struct bio *bio, int error) |
| 310 | { |
| 311 | struct closure *cl = bio->bi_private; |
| 312 | struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl); |
| 313 | |
| 314 | cache_set_err_on(error, c, "accessing uuids"); |
| 315 | bch_bbio_free(bio, c); |
| 316 | closure_put(cl); |
| 317 | } |
| 318 | |
| 319 | static void uuid_io(struct cache_set *c, unsigned long rw, |
| 320 | struct bkey *k, struct closure *parent) |
| 321 | { |
| 322 | struct closure *cl = &c->uuid_write.cl; |
| 323 | struct uuid_entry *u; |
| 324 | unsigned i; |
| 325 | |
| 326 | BUG_ON(!parent); |
| 327 | closure_lock(&c->uuid_write, parent); |
| 328 | |
| 329 | for (i = 0; i < KEY_PTRS(k); i++) { |
| 330 | struct bio *bio = bch_bbio_alloc(c); |
| 331 | |
| 332 | bio->bi_rw = REQ_SYNC|REQ_META|rw; |
| 333 | bio->bi_size = KEY_SIZE(k) << 9; |
| 334 | |
| 335 | bio->bi_end_io = uuid_endio; |
| 336 | bio->bi_private = cl; |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 337 | bch_bio_map(bio, c->uuids); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 338 | |
| 339 | bch_submit_bbio(bio, c, k, i); |
| 340 | |
| 341 | if (!(rw & WRITE)) |
| 342 | break; |
| 343 | } |
| 344 | |
| 345 | pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", |
| 346 | pkey(&c->uuid_bucket)); |
| 347 | |
| 348 | for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 349 | if (!bch_is_zero(u->uuid, 16)) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 350 | pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", |
| 351 | u - c->uuids, u->uuid, u->label, |
| 352 | u->first_reg, u->last_reg, u->invalidated); |
| 353 | |
| 354 | closure_return(cl); |
| 355 | } |
| 356 | |
| 357 | static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) |
| 358 | { |
| 359 | struct bkey *k = &j->uuid_bucket; |
| 360 | |
| 361 | if (__bch_ptr_invalid(c, 1, k)) |
| 362 | return "bad uuid pointer"; |
| 363 | |
| 364 | bkey_copy(&c->uuid_bucket, k); |
| 365 | uuid_io(c, READ_SYNC, k, cl); |
| 366 | |
| 367 | if (j->version < BCACHE_JSET_VERSION_UUIDv1) { |
| 368 | struct uuid_entry_v0 *u0 = (void *) c->uuids; |
| 369 | struct uuid_entry *u1 = (void *) c->uuids; |
| 370 | int i; |
| 371 | |
| 372 | closure_sync(cl); |
| 373 | |
| 374 | /* |
| 375 | * Since the new uuid entry is bigger than the old, we have to |
| 376 | * convert starting at the highest memory address and work down |
| 377 | * in order to do it in place |
| 378 | */ |
| 379 | |
| 380 | for (i = c->nr_uuids - 1; |
| 381 | i >= 0; |
| 382 | --i) { |
| 383 | memcpy(u1[i].uuid, u0[i].uuid, 16); |
| 384 | memcpy(u1[i].label, u0[i].label, 32); |
| 385 | |
| 386 | u1[i].first_reg = u0[i].first_reg; |
| 387 | u1[i].last_reg = u0[i].last_reg; |
| 388 | u1[i].invalidated = u0[i].invalidated; |
| 389 | |
| 390 | u1[i].flags = 0; |
| 391 | u1[i].sectors = 0; |
| 392 | } |
| 393 | } |
| 394 | |
| 395 | return NULL; |
| 396 | } |
| 397 | |
| 398 | static int __uuid_write(struct cache_set *c) |
| 399 | { |
| 400 | BKEY_PADDED(key) k; |
| 401 | struct closure cl; |
| 402 | closure_init_stack(&cl); |
| 403 | |
| 404 | lockdep_assert_held(&bch_register_lock); |
| 405 | |
| 406 | if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl)) |
| 407 | return 1; |
| 408 | |
| 409 | SET_KEY_SIZE(&k.key, c->sb.bucket_size); |
| 410 | uuid_io(c, REQ_WRITE, &k.key, &cl); |
| 411 | closure_sync(&cl); |
| 412 | |
| 413 | bkey_copy(&c->uuid_bucket, &k.key); |
| 414 | __bkey_put(c, &k.key); |
| 415 | return 0; |
| 416 | } |
| 417 | |
| 418 | int bch_uuid_write(struct cache_set *c) |
| 419 | { |
| 420 | int ret = __uuid_write(c); |
| 421 | |
| 422 | if (!ret) |
| 423 | bch_journal_meta(c, NULL); |
| 424 | |
| 425 | return ret; |
| 426 | } |
| 427 | |
| 428 | static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) |
| 429 | { |
| 430 | struct uuid_entry *u; |
| 431 | |
| 432 | for (u = c->uuids; |
| 433 | u < c->uuids + c->nr_uuids; u++) |
| 434 | if (!memcmp(u->uuid, uuid, 16)) |
| 435 | return u; |
| 436 | |
| 437 | return NULL; |
| 438 | } |
| 439 | |
| 440 | static struct uuid_entry *uuid_find_empty(struct cache_set *c) |
| 441 | { |
| 442 | static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; |
| 443 | return uuid_find(c, zero_uuid); |
| 444 | } |
| 445 | |
| 446 | /* |
| 447 | * Bucket priorities/gens: |
| 448 | * |
| 449 | * For each bucket, we store on disk its |
| 450 | * 8 bit gen |
| 451 | * 16 bit priority |
| 452 | * |
| 453 | * See alloc.c for an explanation of the gen. The priority is used to implement |
| 454 | * lru (and in the future other) cache replacement policies; for most purposes |
| 455 | * it's just an opaque integer. |
| 456 | * |
| 457 | * The gens and the priorities don't have a whole lot to do with each other, and |
| 458 | * it's actually the gens that must be written out at specific times - it's no |
| 459 | * big deal if the priorities don't get written, if we lose them we just reuse |
| 460 | * buckets in suboptimal order. |
| 461 | * |
| 462 | * On disk they're stored in a packed array, and in as many buckets are required |
| 463 | * to fit them all. The buckets we use to store them form a list; the journal |
| 464 | * header points to the first bucket, the first bucket points to the second |
| 465 | * bucket, et cetera. |
| 466 | * |
| 467 | * This code is used by the allocation code; periodically (whenever it runs out |
| 468 | * of buckets to allocate from) the allocation code will invalidate some |
| 469 | * buckets, but it can't use those buckets until their new gens are safely on |
| 470 | * disk. |
| 471 | */ |
| 472 | |
| 473 | static void prio_endio(struct bio *bio, int error) |
| 474 | { |
| 475 | struct cache *ca = bio->bi_private; |
| 476 | |
| 477 | cache_set_err_on(error, ca->set, "accessing priorities"); |
| 478 | bch_bbio_free(bio, ca->set); |
| 479 | closure_put(&ca->prio); |
| 480 | } |
| 481 | |
| 482 | static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) |
| 483 | { |
| 484 | struct closure *cl = &ca->prio; |
| 485 | struct bio *bio = bch_bbio_alloc(ca->set); |
| 486 | |
| 487 | closure_init_stack(cl); |
| 488 | |
| 489 | bio->bi_sector = bucket * ca->sb.bucket_size; |
| 490 | bio->bi_bdev = ca->bdev; |
| 491 | bio->bi_rw = REQ_SYNC|REQ_META|rw; |
| 492 | bio->bi_size = bucket_bytes(ca); |
| 493 | |
| 494 | bio->bi_end_io = prio_endio; |
| 495 | bio->bi_private = ca; |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 496 | bch_bio_map(bio, ca->disk_buckets); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 497 | |
| 498 | closure_bio_submit(bio, &ca->prio, ca); |
| 499 | closure_sync(cl); |
| 500 | } |
| 501 | |
| 502 | #define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \ |
| 503 | fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused) |
| 504 | |
| 505 | void bch_prio_write(struct cache *ca) |
| 506 | { |
| 507 | int i; |
| 508 | struct bucket *b; |
| 509 | struct closure cl; |
| 510 | |
| 511 | closure_init_stack(&cl); |
| 512 | |
| 513 | lockdep_assert_held(&ca->set->bucket_lock); |
| 514 | |
| 515 | for (b = ca->buckets; |
| 516 | b < ca->buckets + ca->sb.nbuckets; b++) |
| 517 | b->disk_gen = b->gen; |
| 518 | |
| 519 | ca->disk_buckets->seq++; |
| 520 | |
| 521 | atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), |
| 522 | &ca->meta_sectors_written); |
| 523 | |
| 524 | pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), |
| 525 | fifo_used(&ca->free_inc), fifo_used(&ca->unused)); |
| 526 | blktrace_msg(ca, "Starting priorities: " buckets_free(ca)); |
| 527 | |
| 528 | for (i = prio_buckets(ca) - 1; i >= 0; --i) { |
| 529 | long bucket; |
| 530 | struct prio_set *p = ca->disk_buckets; |
Kent Overstreet | b1a67b0 | 2013-03-25 11:46:44 -0700 | [diff] [blame] | 531 | struct bucket_disk *d = p->data; |
| 532 | struct bucket_disk *end = d + prios_per_bucket(ca); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 533 | |
| 534 | for (b = ca->buckets + i * prios_per_bucket(ca); |
| 535 | b < ca->buckets + ca->sb.nbuckets && d < end; |
| 536 | b++, d++) { |
| 537 | d->prio = cpu_to_le16(b->prio); |
| 538 | d->gen = b->gen; |
| 539 | } |
| 540 | |
| 541 | p->next_bucket = ca->prio_buckets[i + 1]; |
| 542 | p->magic = pset_magic(ca); |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 543 | p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 544 | |
| 545 | bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl); |
| 546 | BUG_ON(bucket == -1); |
| 547 | |
| 548 | mutex_unlock(&ca->set->bucket_lock); |
| 549 | prio_io(ca, bucket, REQ_WRITE); |
| 550 | mutex_lock(&ca->set->bucket_lock); |
| 551 | |
| 552 | ca->prio_buckets[i] = bucket; |
| 553 | atomic_dec_bug(&ca->buckets[bucket].pin); |
| 554 | } |
| 555 | |
| 556 | mutex_unlock(&ca->set->bucket_lock); |
| 557 | |
| 558 | bch_journal_meta(ca->set, &cl); |
| 559 | closure_sync(&cl); |
| 560 | |
| 561 | mutex_lock(&ca->set->bucket_lock); |
| 562 | |
| 563 | ca->need_save_prio = 0; |
| 564 | |
| 565 | /* |
| 566 | * Don't want the old priorities to get garbage collected until after we |
| 567 | * finish writing the new ones, and they're journalled |
| 568 | */ |
| 569 | for (i = 0; i < prio_buckets(ca); i++) |
| 570 | ca->prio_last_buckets[i] = ca->prio_buckets[i]; |
| 571 | } |
| 572 | |
| 573 | static void prio_read(struct cache *ca, uint64_t bucket) |
| 574 | { |
| 575 | struct prio_set *p = ca->disk_buckets; |
| 576 | struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; |
| 577 | struct bucket *b; |
| 578 | unsigned bucket_nr = 0; |
| 579 | |
| 580 | for (b = ca->buckets; |
| 581 | b < ca->buckets + ca->sb.nbuckets; |
| 582 | b++, d++) { |
| 583 | if (d == end) { |
| 584 | ca->prio_buckets[bucket_nr] = bucket; |
| 585 | ca->prio_last_buckets[bucket_nr] = bucket; |
| 586 | bucket_nr++; |
| 587 | |
| 588 | prio_io(ca, bucket, READ_SYNC); |
| 589 | |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 590 | if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 591 | pr_warn("bad csum reading priorities"); |
| 592 | |
| 593 | if (p->magic != pset_magic(ca)) |
| 594 | pr_warn("bad magic reading priorities"); |
| 595 | |
| 596 | bucket = p->next_bucket; |
| 597 | d = p->data; |
| 598 | } |
| 599 | |
| 600 | b->prio = le16_to_cpu(d->prio); |
| 601 | b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen; |
| 602 | } |
| 603 | } |
| 604 | |
| 605 | /* Bcache device */ |
| 606 | |
| 607 | static int open_dev(struct block_device *b, fmode_t mode) |
| 608 | { |
| 609 | struct bcache_device *d = b->bd_disk->private_data; |
| 610 | if (atomic_read(&d->closing)) |
| 611 | return -ENXIO; |
| 612 | |
| 613 | closure_get(&d->cl); |
| 614 | return 0; |
| 615 | } |
| 616 | |
| 617 | static int release_dev(struct gendisk *b, fmode_t mode) |
| 618 | { |
| 619 | struct bcache_device *d = b->private_data; |
| 620 | closure_put(&d->cl); |
| 621 | return 0; |
| 622 | } |
| 623 | |
| 624 | static int ioctl_dev(struct block_device *b, fmode_t mode, |
| 625 | unsigned int cmd, unsigned long arg) |
| 626 | { |
| 627 | struct bcache_device *d = b->bd_disk->private_data; |
| 628 | return d->ioctl(d, mode, cmd, arg); |
| 629 | } |
| 630 | |
| 631 | static const struct block_device_operations bcache_ops = { |
| 632 | .open = open_dev, |
| 633 | .release = release_dev, |
| 634 | .ioctl = ioctl_dev, |
| 635 | .owner = THIS_MODULE, |
| 636 | }; |
| 637 | |
| 638 | void bcache_device_stop(struct bcache_device *d) |
| 639 | { |
| 640 | if (!atomic_xchg(&d->closing, 1)) |
| 641 | closure_queue(&d->cl); |
| 642 | } |
| 643 | |
| 644 | static void bcache_device_detach(struct bcache_device *d) |
| 645 | { |
| 646 | lockdep_assert_held(&bch_register_lock); |
| 647 | |
| 648 | if (atomic_read(&d->detaching)) { |
| 649 | struct uuid_entry *u = d->c->uuids + d->id; |
| 650 | |
| 651 | SET_UUID_FLASH_ONLY(u, 0); |
| 652 | memcpy(u->uuid, invalid_uuid, 16); |
| 653 | u->invalidated = cpu_to_le32(get_seconds()); |
| 654 | bch_uuid_write(d->c); |
| 655 | |
| 656 | atomic_set(&d->detaching, 0); |
| 657 | } |
| 658 | |
| 659 | d->c->devices[d->id] = NULL; |
| 660 | closure_put(&d->c->caching); |
| 661 | d->c = NULL; |
| 662 | } |
| 663 | |
| 664 | static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, |
| 665 | unsigned id) |
| 666 | { |
| 667 | BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags)); |
| 668 | |
| 669 | d->id = id; |
| 670 | d->c = c; |
| 671 | c->devices[id] = d; |
| 672 | |
| 673 | closure_get(&c->caching); |
| 674 | } |
| 675 | |
| 676 | static void bcache_device_link(struct bcache_device *d, struct cache_set *c, |
| 677 | const char *name) |
| 678 | { |
| 679 | snprintf(d->name, BCACHEDEVNAME_SIZE, |
| 680 | "%s%u", name, d->id); |
| 681 | |
| 682 | WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || |
| 683 | sysfs_create_link(&c->kobj, &d->kobj, d->name), |
| 684 | "Couldn't create device <-> cache set symlinks"); |
| 685 | } |
| 686 | |
| 687 | static void bcache_device_free(struct bcache_device *d) |
| 688 | { |
| 689 | lockdep_assert_held(&bch_register_lock); |
| 690 | |
| 691 | pr_info("%s stopped", d->disk->disk_name); |
| 692 | |
| 693 | if (d->c) |
| 694 | bcache_device_detach(d); |
| 695 | |
| 696 | if (d->disk) |
| 697 | del_gendisk(d->disk); |
| 698 | if (d->disk && d->disk->queue) |
| 699 | blk_cleanup_queue(d->disk->queue); |
| 700 | if (d->disk) |
| 701 | put_disk(d->disk); |
| 702 | |
| 703 | bio_split_pool_free(&d->bio_split_hook); |
| 704 | if (d->unaligned_bvec) |
| 705 | mempool_destroy(d->unaligned_bvec); |
| 706 | if (d->bio_split) |
| 707 | bioset_free(d->bio_split); |
| 708 | |
| 709 | closure_debug_destroy(&d->cl); |
| 710 | } |
| 711 | |
| 712 | static int bcache_device_init(struct bcache_device *d, unsigned block_size) |
| 713 | { |
| 714 | struct request_queue *q; |
| 715 | |
| 716 | if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || |
| 717 | !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, |
| 718 | sizeof(struct bio_vec) * BIO_MAX_PAGES)) || |
| 719 | bio_split_pool_init(&d->bio_split_hook)) |
| 720 | |
| 721 | return -ENOMEM; |
| 722 | |
| 723 | d->disk = alloc_disk(1); |
| 724 | if (!d->disk) |
| 725 | return -ENOMEM; |
| 726 | |
| 727 | snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); |
| 728 | |
| 729 | d->disk->major = bcache_major; |
| 730 | d->disk->first_minor = bcache_minor++; |
| 731 | d->disk->fops = &bcache_ops; |
| 732 | d->disk->private_data = d; |
| 733 | |
| 734 | q = blk_alloc_queue(GFP_KERNEL); |
| 735 | if (!q) |
| 736 | return -ENOMEM; |
| 737 | |
| 738 | blk_queue_make_request(q, NULL); |
| 739 | d->disk->queue = q; |
| 740 | q->queuedata = d; |
| 741 | q->backing_dev_info.congested_data = d; |
| 742 | q->limits.max_hw_sectors = UINT_MAX; |
| 743 | q->limits.max_sectors = UINT_MAX; |
| 744 | q->limits.max_segment_size = UINT_MAX; |
| 745 | q->limits.max_segments = BIO_MAX_PAGES; |
| 746 | q->limits.max_discard_sectors = UINT_MAX; |
| 747 | q->limits.io_min = block_size; |
| 748 | q->limits.logical_block_size = block_size; |
| 749 | q->limits.physical_block_size = block_size; |
| 750 | set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); |
| 751 | set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); |
| 752 | |
| 753 | return 0; |
| 754 | } |
| 755 | |
| 756 | /* Cached device */ |
| 757 | |
| 758 | static void calc_cached_dev_sectors(struct cache_set *c) |
| 759 | { |
| 760 | uint64_t sectors = 0; |
| 761 | struct cached_dev *dc; |
| 762 | |
| 763 | list_for_each_entry(dc, &c->cached_devs, list) |
| 764 | sectors += bdev_sectors(dc->bdev); |
| 765 | |
| 766 | c->cached_dev_sectors = sectors; |
| 767 | } |
| 768 | |
| 769 | void bch_cached_dev_run(struct cached_dev *dc) |
| 770 | { |
| 771 | struct bcache_device *d = &dc->disk; |
| 772 | |
| 773 | if (atomic_xchg(&dc->running, 1)) |
| 774 | return; |
| 775 | |
| 776 | if (!d->c && |
| 777 | BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { |
| 778 | struct closure cl; |
| 779 | closure_init_stack(&cl); |
| 780 | |
| 781 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); |
| 782 | bch_write_bdev_super(dc, &cl); |
| 783 | closure_sync(&cl); |
| 784 | } |
| 785 | |
| 786 | add_disk(d->disk); |
| 787 | #if 0 |
| 788 | char *env[] = { "SYMLINK=label" , NULL }; |
| 789 | kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); |
| 790 | #endif |
| 791 | if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || |
| 792 | sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) |
| 793 | pr_debug("error creating sysfs link"); |
| 794 | } |
| 795 | |
| 796 | static void cached_dev_detach_finish(struct work_struct *w) |
| 797 | { |
| 798 | struct cached_dev *dc = container_of(w, struct cached_dev, detach); |
| 799 | char buf[BDEVNAME_SIZE]; |
| 800 | struct closure cl; |
| 801 | closure_init_stack(&cl); |
| 802 | |
| 803 | BUG_ON(!atomic_read(&dc->disk.detaching)); |
| 804 | BUG_ON(atomic_read(&dc->count)); |
| 805 | |
| 806 | sysfs_remove_link(&dc->disk.c->kobj, dc->disk.name); |
| 807 | sysfs_remove_link(&dc->disk.kobj, "cache"); |
| 808 | |
| 809 | mutex_lock(&bch_register_lock); |
| 810 | |
| 811 | memset(&dc->sb.set_uuid, 0, 16); |
| 812 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); |
| 813 | |
| 814 | bch_write_bdev_super(dc, &cl); |
| 815 | closure_sync(&cl); |
| 816 | |
| 817 | bcache_device_detach(&dc->disk); |
| 818 | list_move(&dc->list, &uncached_devices); |
| 819 | |
| 820 | mutex_unlock(&bch_register_lock); |
| 821 | |
| 822 | pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); |
| 823 | |
| 824 | /* Drop ref we took in cached_dev_detach() */ |
| 825 | closure_put(&dc->disk.cl); |
| 826 | } |
| 827 | |
| 828 | void bch_cached_dev_detach(struct cached_dev *dc) |
| 829 | { |
| 830 | lockdep_assert_held(&bch_register_lock); |
| 831 | |
| 832 | if (atomic_read(&dc->disk.closing)) |
| 833 | return; |
| 834 | |
| 835 | if (atomic_xchg(&dc->disk.detaching, 1)) |
| 836 | return; |
| 837 | |
| 838 | /* |
| 839 | * Block the device from being closed and freed until we're finished |
| 840 | * detaching |
| 841 | */ |
| 842 | closure_get(&dc->disk.cl); |
| 843 | |
| 844 | bch_writeback_queue(dc); |
| 845 | cached_dev_put(dc); |
| 846 | } |
| 847 | |
| 848 | int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) |
| 849 | { |
| 850 | uint32_t rtime = cpu_to_le32(get_seconds()); |
| 851 | struct uuid_entry *u; |
| 852 | char buf[BDEVNAME_SIZE]; |
| 853 | |
| 854 | bdevname(dc->bdev, buf); |
| 855 | |
| 856 | if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) |
| 857 | return -ENOENT; |
| 858 | |
| 859 | if (dc->disk.c) { |
| 860 | pr_err("Can't attach %s: already attached", buf); |
| 861 | return -EINVAL; |
| 862 | } |
| 863 | |
| 864 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) { |
| 865 | pr_err("Can't attach %s: shutting down", buf); |
| 866 | return -EINVAL; |
| 867 | } |
| 868 | |
| 869 | if (dc->sb.block_size < c->sb.block_size) { |
| 870 | /* Will die */ |
Kent Overstreet | b1a67b0 | 2013-03-25 11:46:44 -0700 | [diff] [blame] | 871 | pr_err("Couldn't attach %s: block size less than set's block size", |
| 872 | buf); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 873 | return -EINVAL; |
| 874 | } |
| 875 | |
| 876 | u = uuid_find(c, dc->sb.uuid); |
| 877 | |
| 878 | if (u && |
| 879 | (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || |
| 880 | BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { |
| 881 | memcpy(u->uuid, invalid_uuid, 16); |
| 882 | u->invalidated = cpu_to_le32(get_seconds()); |
| 883 | u = NULL; |
| 884 | } |
| 885 | |
| 886 | if (!u) { |
| 887 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { |
| 888 | pr_err("Couldn't find uuid for %s in set", buf); |
| 889 | return -ENOENT; |
| 890 | } |
| 891 | |
| 892 | u = uuid_find_empty(c); |
| 893 | if (!u) { |
| 894 | pr_err("Not caching %s, no room for UUID", buf); |
| 895 | return -EINVAL; |
| 896 | } |
| 897 | } |
| 898 | |
| 899 | /* Deadlocks since we're called via sysfs... |
| 900 | sysfs_remove_file(&dc->kobj, &sysfs_attach); |
| 901 | */ |
| 902 | |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 903 | if (bch_is_zero(u->uuid, 16)) { |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 904 | struct closure cl; |
| 905 | closure_init_stack(&cl); |
| 906 | |
| 907 | memcpy(u->uuid, dc->sb.uuid, 16); |
| 908 | memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); |
| 909 | u->first_reg = u->last_reg = rtime; |
| 910 | bch_uuid_write(c); |
| 911 | |
| 912 | memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); |
| 913 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); |
| 914 | |
| 915 | bch_write_bdev_super(dc, &cl); |
| 916 | closure_sync(&cl); |
| 917 | } else { |
| 918 | u->last_reg = rtime; |
| 919 | bch_uuid_write(c); |
| 920 | } |
| 921 | |
| 922 | bcache_device_attach(&dc->disk, c, u - c->uuids); |
| 923 | bcache_device_link(&dc->disk, c, "bdev"); |
| 924 | list_move(&dc->list, &c->cached_devs); |
| 925 | calc_cached_dev_sectors(c); |
| 926 | |
| 927 | smp_wmb(); |
| 928 | /* |
| 929 | * dc->c must be set before dc->count != 0 - paired with the mb in |
| 930 | * cached_dev_get() |
| 931 | */ |
| 932 | atomic_set(&dc->count, 1); |
| 933 | |
| 934 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { |
| 935 | atomic_set(&dc->has_dirty, 1); |
| 936 | atomic_inc(&dc->count); |
| 937 | bch_writeback_queue(dc); |
| 938 | } |
| 939 | |
| 940 | bch_cached_dev_run(dc); |
| 941 | |
| 942 | pr_info("Caching %s as %s on set %pU", |
| 943 | bdevname(dc->bdev, buf), dc->disk.disk->disk_name, |
| 944 | dc->disk.c->sb.set_uuid); |
| 945 | return 0; |
| 946 | } |
| 947 | |
| 948 | void bch_cached_dev_release(struct kobject *kobj) |
| 949 | { |
| 950 | struct cached_dev *dc = container_of(kobj, struct cached_dev, |
| 951 | disk.kobj); |
| 952 | kfree(dc); |
| 953 | module_put(THIS_MODULE); |
| 954 | } |
| 955 | |
| 956 | static void cached_dev_free(struct closure *cl) |
| 957 | { |
| 958 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); |
| 959 | |
| 960 | cancel_delayed_work_sync(&dc->writeback_rate_update); |
| 961 | |
| 962 | mutex_lock(&bch_register_lock); |
| 963 | |
| 964 | bcache_device_free(&dc->disk); |
| 965 | list_del(&dc->list); |
| 966 | |
| 967 | mutex_unlock(&bch_register_lock); |
| 968 | |
| 969 | if (!IS_ERR_OR_NULL(dc->bdev)) { |
| 970 | blk_sync_queue(bdev_get_queue(dc->bdev)); |
| 971 | blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
| 972 | } |
| 973 | |
| 974 | wake_up(&unregister_wait); |
| 975 | |
| 976 | kobject_put(&dc->disk.kobj); |
| 977 | } |
| 978 | |
| 979 | static void cached_dev_flush(struct closure *cl) |
| 980 | { |
| 981 | struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); |
| 982 | struct bcache_device *d = &dc->disk; |
| 983 | |
| 984 | bch_cache_accounting_destroy(&dc->accounting); |
| 985 | kobject_del(&d->kobj); |
| 986 | |
| 987 | continue_at(cl, cached_dev_free, system_wq); |
| 988 | } |
| 989 | |
| 990 | static int cached_dev_init(struct cached_dev *dc, unsigned block_size) |
| 991 | { |
| 992 | int err; |
| 993 | struct io *io; |
| 994 | |
| 995 | closure_init(&dc->disk.cl, NULL); |
| 996 | set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); |
| 997 | |
| 998 | __module_get(THIS_MODULE); |
| 999 | INIT_LIST_HEAD(&dc->list); |
| 1000 | kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); |
| 1001 | |
| 1002 | bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); |
| 1003 | |
| 1004 | err = bcache_device_init(&dc->disk, block_size); |
| 1005 | if (err) |
| 1006 | goto err; |
| 1007 | |
| 1008 | spin_lock_init(&dc->io_lock); |
| 1009 | closure_init_unlocked(&dc->sb_write); |
| 1010 | INIT_WORK(&dc->detach, cached_dev_detach_finish); |
| 1011 | |
| 1012 | dc->sequential_merge = true; |
| 1013 | dc->sequential_cutoff = 4 << 20; |
| 1014 | |
| 1015 | INIT_LIST_HEAD(&dc->io_lru); |
| 1016 | dc->sb_bio.bi_max_vecs = 1; |
| 1017 | dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; |
| 1018 | |
| 1019 | for (io = dc->io; io < dc->io + RECENT_IO; io++) { |
| 1020 | list_add(&io->lru, &dc->io_lru); |
| 1021 | hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); |
| 1022 | } |
| 1023 | |
| 1024 | bch_writeback_init_cached_dev(dc); |
| 1025 | return 0; |
| 1026 | err: |
| 1027 | bcache_device_stop(&dc->disk); |
| 1028 | return err; |
| 1029 | } |
| 1030 | |
| 1031 | /* Cached device - bcache superblock */ |
| 1032 | |
| 1033 | static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, |
| 1034 | struct block_device *bdev, |
| 1035 | struct cached_dev *dc) |
| 1036 | { |
| 1037 | char name[BDEVNAME_SIZE]; |
| 1038 | const char *err = "cannot allocate memory"; |
| 1039 | struct gendisk *g; |
| 1040 | struct cache_set *c; |
| 1041 | |
| 1042 | if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0) |
| 1043 | return err; |
| 1044 | |
| 1045 | memcpy(&dc->sb, sb, sizeof(struct cache_sb)); |
| 1046 | dc->sb_bio.bi_io_vec[0].bv_page = sb_page; |
| 1047 | dc->bdev = bdev; |
| 1048 | dc->bdev->bd_holder = dc; |
| 1049 | |
| 1050 | g = dc->disk.disk; |
| 1051 | |
| 1052 | set_capacity(g, dc->bdev->bd_part->nr_sects - 16); |
| 1053 | |
| 1054 | bch_cached_dev_request_init(dc); |
| 1055 | |
| 1056 | err = "error creating kobject"; |
| 1057 | if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, |
| 1058 | "bcache")) |
| 1059 | goto err; |
| 1060 | if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) |
| 1061 | goto err; |
| 1062 | |
| 1063 | list_add(&dc->list, &uncached_devices); |
| 1064 | list_for_each_entry(c, &bch_cache_sets, list) |
| 1065 | bch_cached_dev_attach(dc, c); |
| 1066 | |
| 1067 | if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || |
| 1068 | BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) |
| 1069 | bch_cached_dev_run(dc); |
| 1070 | |
| 1071 | return NULL; |
| 1072 | err: |
| 1073 | kobject_put(&dc->disk.kobj); |
| 1074 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); |
| 1075 | /* |
| 1076 | * Return NULL instead of an error because kobject_put() cleans |
| 1077 | * everything up |
| 1078 | */ |
| 1079 | return NULL; |
| 1080 | } |
| 1081 | |
| 1082 | /* Flash only volumes */ |
| 1083 | |
| 1084 | void bch_flash_dev_release(struct kobject *kobj) |
| 1085 | { |
| 1086 | struct bcache_device *d = container_of(kobj, struct bcache_device, |
| 1087 | kobj); |
| 1088 | kfree(d); |
| 1089 | } |
| 1090 | |
| 1091 | static void flash_dev_free(struct closure *cl) |
| 1092 | { |
| 1093 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); |
| 1094 | bcache_device_free(d); |
| 1095 | kobject_put(&d->kobj); |
| 1096 | } |
| 1097 | |
| 1098 | static void flash_dev_flush(struct closure *cl) |
| 1099 | { |
| 1100 | struct bcache_device *d = container_of(cl, struct bcache_device, cl); |
| 1101 | |
| 1102 | sysfs_remove_link(&d->c->kobj, d->name); |
| 1103 | sysfs_remove_link(&d->kobj, "cache"); |
| 1104 | kobject_del(&d->kobj); |
| 1105 | continue_at(cl, flash_dev_free, system_wq); |
| 1106 | } |
| 1107 | |
| 1108 | static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) |
| 1109 | { |
| 1110 | struct bcache_device *d = kzalloc(sizeof(struct bcache_device), |
| 1111 | GFP_KERNEL); |
| 1112 | if (!d) |
| 1113 | return -ENOMEM; |
| 1114 | |
| 1115 | closure_init(&d->cl, NULL); |
| 1116 | set_closure_fn(&d->cl, flash_dev_flush, system_wq); |
| 1117 | |
| 1118 | kobject_init(&d->kobj, &bch_flash_dev_ktype); |
| 1119 | |
| 1120 | if (bcache_device_init(d, block_bytes(c))) |
| 1121 | goto err; |
| 1122 | |
| 1123 | bcache_device_attach(d, c, u - c->uuids); |
| 1124 | set_capacity(d->disk, u->sectors); |
| 1125 | bch_flash_dev_request_init(d); |
| 1126 | add_disk(d->disk); |
| 1127 | |
| 1128 | if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) |
| 1129 | goto err; |
| 1130 | |
| 1131 | bcache_device_link(d, c, "volume"); |
| 1132 | |
| 1133 | return 0; |
| 1134 | err: |
| 1135 | kobject_put(&d->kobj); |
| 1136 | return -ENOMEM; |
| 1137 | } |
| 1138 | |
| 1139 | static int flash_devs_run(struct cache_set *c) |
| 1140 | { |
| 1141 | int ret = 0; |
| 1142 | struct uuid_entry *u; |
| 1143 | |
| 1144 | for (u = c->uuids; |
| 1145 | u < c->uuids + c->nr_uuids && !ret; |
| 1146 | u++) |
| 1147 | if (UUID_FLASH_ONLY(u)) |
| 1148 | ret = flash_dev_run(c, u); |
| 1149 | |
| 1150 | return ret; |
| 1151 | } |
| 1152 | |
| 1153 | int bch_flash_dev_create(struct cache_set *c, uint64_t size) |
| 1154 | { |
| 1155 | struct uuid_entry *u; |
| 1156 | |
| 1157 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) |
| 1158 | return -EINTR; |
| 1159 | |
| 1160 | u = uuid_find_empty(c); |
| 1161 | if (!u) { |
| 1162 | pr_err("Can't create volume, no room for UUID"); |
| 1163 | return -EINVAL; |
| 1164 | } |
| 1165 | |
| 1166 | get_random_bytes(u->uuid, 16); |
| 1167 | memset(u->label, 0, 32); |
| 1168 | u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); |
| 1169 | |
| 1170 | SET_UUID_FLASH_ONLY(u, 1); |
| 1171 | u->sectors = size >> 9; |
| 1172 | |
| 1173 | bch_uuid_write(c); |
| 1174 | |
| 1175 | return flash_dev_run(c, u); |
| 1176 | } |
| 1177 | |
| 1178 | /* Cache set */ |
| 1179 | |
| 1180 | __printf(2, 3) |
| 1181 | bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) |
| 1182 | { |
| 1183 | va_list args; |
| 1184 | |
| 1185 | if (test_bit(CACHE_SET_STOPPING, &c->flags)) |
| 1186 | return false; |
| 1187 | |
| 1188 | /* XXX: we can be called from atomic context |
| 1189 | acquire_console_sem(); |
| 1190 | */ |
| 1191 | |
| 1192 | printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); |
| 1193 | |
| 1194 | va_start(args, fmt); |
| 1195 | vprintk(fmt, args); |
| 1196 | va_end(args); |
| 1197 | |
| 1198 | printk(", disabling caching\n"); |
| 1199 | |
| 1200 | bch_cache_set_unregister(c); |
| 1201 | return true; |
| 1202 | } |
| 1203 | |
| 1204 | void bch_cache_set_release(struct kobject *kobj) |
| 1205 | { |
| 1206 | struct cache_set *c = container_of(kobj, struct cache_set, kobj); |
| 1207 | kfree(c); |
| 1208 | module_put(THIS_MODULE); |
| 1209 | } |
| 1210 | |
| 1211 | static void cache_set_free(struct closure *cl) |
| 1212 | { |
| 1213 | struct cache_set *c = container_of(cl, struct cache_set, cl); |
| 1214 | struct cache *ca; |
| 1215 | unsigned i; |
| 1216 | |
| 1217 | if (!IS_ERR_OR_NULL(c->debug)) |
| 1218 | debugfs_remove(c->debug); |
| 1219 | |
| 1220 | bch_open_buckets_free(c); |
| 1221 | bch_btree_cache_free(c); |
| 1222 | bch_journal_free(c); |
| 1223 | |
| 1224 | for_each_cache(ca, c, i) |
| 1225 | if (ca) |
| 1226 | kobject_put(&ca->kobj); |
| 1227 | |
| 1228 | free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); |
| 1229 | free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); |
| 1230 | |
| 1231 | kfree(c->fill_iter); |
| 1232 | if (c->bio_split) |
| 1233 | bioset_free(c->bio_split); |
| 1234 | if (c->bio_meta) |
| 1235 | mempool_destroy(c->bio_meta); |
| 1236 | if (c->search) |
| 1237 | mempool_destroy(c->search); |
| 1238 | kfree(c->devices); |
| 1239 | |
| 1240 | mutex_lock(&bch_register_lock); |
| 1241 | list_del(&c->list); |
| 1242 | mutex_unlock(&bch_register_lock); |
| 1243 | |
| 1244 | pr_info("Cache set %pU unregistered", c->sb.set_uuid); |
| 1245 | wake_up(&unregister_wait); |
| 1246 | |
| 1247 | closure_debug_destroy(&c->cl); |
| 1248 | kobject_put(&c->kobj); |
| 1249 | } |
| 1250 | |
| 1251 | static void cache_set_flush(struct closure *cl) |
| 1252 | { |
| 1253 | struct cache_set *c = container_of(cl, struct cache_set, caching); |
| 1254 | struct btree *b; |
| 1255 | |
| 1256 | /* Shut down allocator threads */ |
| 1257 | set_bit(CACHE_SET_STOPPING_2, &c->flags); |
| 1258 | wake_up(&c->alloc_wait); |
| 1259 | |
| 1260 | bch_cache_accounting_destroy(&c->accounting); |
| 1261 | |
| 1262 | kobject_put(&c->internal); |
| 1263 | kobject_del(&c->kobj); |
| 1264 | |
| 1265 | if (!IS_ERR_OR_NULL(c->root)) |
| 1266 | list_add(&c->root->list, &c->btree_cache); |
| 1267 | |
| 1268 | /* Should skip this if we're unregistering because of an error */ |
| 1269 | list_for_each_entry(b, &c->btree_cache, list) |
| 1270 | if (btree_node_dirty(b)) |
| 1271 | bch_btree_write(b, true, NULL); |
| 1272 | |
| 1273 | closure_return(cl); |
| 1274 | } |
| 1275 | |
| 1276 | static void __cache_set_unregister(struct closure *cl) |
| 1277 | { |
| 1278 | struct cache_set *c = container_of(cl, struct cache_set, caching); |
| 1279 | struct cached_dev *dc, *t; |
| 1280 | size_t i; |
| 1281 | |
| 1282 | mutex_lock(&bch_register_lock); |
| 1283 | |
| 1284 | if (test_bit(CACHE_SET_UNREGISTERING, &c->flags)) |
| 1285 | list_for_each_entry_safe(dc, t, &c->cached_devs, list) |
| 1286 | bch_cached_dev_detach(dc); |
| 1287 | |
| 1288 | for (i = 0; i < c->nr_uuids; i++) |
| 1289 | if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i])) |
| 1290 | bcache_device_stop(c->devices[i]); |
| 1291 | |
| 1292 | mutex_unlock(&bch_register_lock); |
| 1293 | |
| 1294 | continue_at(cl, cache_set_flush, system_wq); |
| 1295 | } |
| 1296 | |
| 1297 | void bch_cache_set_stop(struct cache_set *c) |
| 1298 | { |
| 1299 | if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) |
| 1300 | closure_queue(&c->caching); |
| 1301 | } |
| 1302 | |
| 1303 | void bch_cache_set_unregister(struct cache_set *c) |
| 1304 | { |
| 1305 | set_bit(CACHE_SET_UNREGISTERING, &c->flags); |
| 1306 | bch_cache_set_stop(c); |
| 1307 | } |
| 1308 | |
| 1309 | #define alloc_bucket_pages(gfp, c) \ |
| 1310 | ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) |
| 1311 | |
| 1312 | struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) |
| 1313 | { |
| 1314 | int iter_size; |
| 1315 | struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); |
| 1316 | if (!c) |
| 1317 | return NULL; |
| 1318 | |
| 1319 | __module_get(THIS_MODULE); |
| 1320 | closure_init(&c->cl, NULL); |
| 1321 | set_closure_fn(&c->cl, cache_set_free, system_wq); |
| 1322 | |
| 1323 | closure_init(&c->caching, &c->cl); |
| 1324 | set_closure_fn(&c->caching, __cache_set_unregister, system_wq); |
| 1325 | |
| 1326 | /* Maybe create continue_at_noreturn() and use it here? */ |
| 1327 | closure_set_stopped(&c->cl); |
| 1328 | closure_put(&c->cl); |
| 1329 | |
| 1330 | kobject_init(&c->kobj, &bch_cache_set_ktype); |
| 1331 | kobject_init(&c->internal, &bch_cache_set_internal_ktype); |
| 1332 | |
| 1333 | bch_cache_accounting_init(&c->accounting, &c->cl); |
| 1334 | |
| 1335 | memcpy(c->sb.set_uuid, sb->set_uuid, 16); |
| 1336 | c->sb.block_size = sb->block_size; |
| 1337 | c->sb.bucket_size = sb->bucket_size; |
| 1338 | c->sb.nr_in_set = sb->nr_in_set; |
| 1339 | c->sb.last_mount = sb->last_mount; |
| 1340 | c->bucket_bits = ilog2(sb->bucket_size); |
| 1341 | c->block_bits = ilog2(sb->block_size); |
| 1342 | c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); |
| 1343 | |
| 1344 | c->btree_pages = c->sb.bucket_size / PAGE_SECTORS; |
| 1345 | if (c->btree_pages > BTREE_MAX_PAGES) |
| 1346 | c->btree_pages = max_t(int, c->btree_pages / 4, |
| 1347 | BTREE_MAX_PAGES); |
| 1348 | |
| 1349 | init_waitqueue_head(&c->alloc_wait); |
| 1350 | mutex_init(&c->bucket_lock); |
| 1351 | mutex_init(&c->fill_lock); |
| 1352 | mutex_init(&c->sort_lock); |
| 1353 | spin_lock_init(&c->sort_time_lock); |
| 1354 | closure_init_unlocked(&c->sb_write); |
| 1355 | closure_init_unlocked(&c->uuid_write); |
| 1356 | spin_lock_init(&c->btree_read_time_lock); |
| 1357 | bch_moving_init_cache_set(c); |
| 1358 | |
| 1359 | INIT_LIST_HEAD(&c->list); |
| 1360 | INIT_LIST_HEAD(&c->cached_devs); |
| 1361 | INIT_LIST_HEAD(&c->btree_cache); |
| 1362 | INIT_LIST_HEAD(&c->btree_cache_freeable); |
| 1363 | INIT_LIST_HEAD(&c->btree_cache_freed); |
| 1364 | INIT_LIST_HEAD(&c->data_buckets); |
| 1365 | |
| 1366 | c->search = mempool_create_slab_pool(32, bch_search_cache); |
| 1367 | if (!c->search) |
| 1368 | goto err; |
| 1369 | |
| 1370 | iter_size = (sb->bucket_size / sb->block_size + 1) * |
| 1371 | sizeof(struct btree_iter_set); |
| 1372 | |
| 1373 | if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || |
| 1374 | !(c->bio_meta = mempool_create_kmalloc_pool(2, |
| 1375 | sizeof(struct bbio) + sizeof(struct bio_vec) * |
| 1376 | bucket_pages(c))) || |
| 1377 | !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || |
| 1378 | !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) || |
| 1379 | !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || |
| 1380 | !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || |
| 1381 | bch_journal_alloc(c) || |
| 1382 | bch_btree_cache_alloc(c) || |
| 1383 | bch_open_buckets_alloc(c)) |
| 1384 | goto err; |
| 1385 | |
| 1386 | c->fill_iter->size = sb->bucket_size / sb->block_size; |
| 1387 | |
| 1388 | c->congested_read_threshold_us = 2000; |
| 1389 | c->congested_write_threshold_us = 20000; |
| 1390 | c->error_limit = 8 << IO_ERROR_SHIFT; |
| 1391 | |
| 1392 | return c; |
| 1393 | err: |
| 1394 | bch_cache_set_unregister(c); |
| 1395 | return NULL; |
| 1396 | } |
| 1397 | |
| 1398 | static void run_cache_set(struct cache_set *c) |
| 1399 | { |
| 1400 | const char *err = "cannot allocate memory"; |
| 1401 | struct cached_dev *dc, *t; |
| 1402 | struct cache *ca; |
| 1403 | unsigned i; |
| 1404 | |
| 1405 | struct btree_op op; |
| 1406 | bch_btree_op_init_stack(&op); |
| 1407 | op.lock = SHRT_MAX; |
| 1408 | |
| 1409 | for_each_cache(ca, c, i) |
| 1410 | c->nbuckets += ca->sb.nbuckets; |
| 1411 | |
| 1412 | if (CACHE_SYNC(&c->sb)) { |
| 1413 | LIST_HEAD(journal); |
| 1414 | struct bkey *k; |
| 1415 | struct jset *j; |
| 1416 | |
| 1417 | err = "cannot allocate memory for journal"; |
| 1418 | if (bch_journal_read(c, &journal, &op)) |
| 1419 | goto err; |
| 1420 | |
| 1421 | pr_debug("btree_journal_read() done"); |
| 1422 | |
| 1423 | err = "no journal entries found"; |
| 1424 | if (list_empty(&journal)) |
| 1425 | goto err; |
| 1426 | |
| 1427 | j = &list_entry(journal.prev, struct journal_replay, list)->j; |
| 1428 | |
| 1429 | err = "IO error reading priorities"; |
| 1430 | for_each_cache(ca, c, i) |
| 1431 | prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); |
| 1432 | |
| 1433 | /* |
| 1434 | * If prio_read() fails it'll call cache_set_error and we'll |
| 1435 | * tear everything down right away, but if we perhaps checked |
| 1436 | * sooner we could avoid journal replay. |
| 1437 | */ |
| 1438 | |
| 1439 | k = &j->btree_root; |
| 1440 | |
| 1441 | err = "bad btree root"; |
| 1442 | if (__bch_ptr_invalid(c, j->btree_level + 1, k)) |
| 1443 | goto err; |
| 1444 | |
| 1445 | err = "error reading btree root"; |
| 1446 | c->root = bch_btree_node_get(c, k, j->btree_level, &op); |
| 1447 | if (IS_ERR_OR_NULL(c->root)) |
| 1448 | goto err; |
| 1449 | |
| 1450 | list_del_init(&c->root->list); |
| 1451 | rw_unlock(true, c->root); |
| 1452 | |
| 1453 | err = uuid_read(c, j, &op.cl); |
| 1454 | if (err) |
| 1455 | goto err; |
| 1456 | |
| 1457 | err = "error in recovery"; |
| 1458 | if (bch_btree_check(c, &op)) |
| 1459 | goto err; |
| 1460 | |
| 1461 | bch_journal_mark(c, &journal); |
| 1462 | bch_btree_gc_finish(c); |
| 1463 | pr_debug("btree_check() done"); |
| 1464 | |
| 1465 | /* |
| 1466 | * bcache_journal_next() can't happen sooner, or |
| 1467 | * btree_gc_finish() will give spurious errors about last_gc > |
| 1468 | * gc_gen - this is a hack but oh well. |
| 1469 | */ |
| 1470 | bch_journal_next(&c->journal); |
| 1471 | |
| 1472 | for_each_cache(ca, c, i) |
| 1473 | closure_call(&ca->alloc, bch_allocator_thread, |
| 1474 | system_wq, &c->cl); |
| 1475 | |
| 1476 | /* |
| 1477 | * First place it's safe to allocate: btree_check() and |
| 1478 | * btree_gc_finish() have to run before we have buckets to |
| 1479 | * allocate, and bch_bucket_alloc_set() might cause a journal |
| 1480 | * entry to be written so bcache_journal_next() has to be called |
| 1481 | * first. |
| 1482 | * |
| 1483 | * If the uuids were in the old format we have to rewrite them |
| 1484 | * before the next journal entry is written: |
| 1485 | */ |
| 1486 | if (j->version < BCACHE_JSET_VERSION_UUID) |
| 1487 | __uuid_write(c); |
| 1488 | |
| 1489 | bch_journal_replay(c, &journal, &op); |
| 1490 | } else { |
| 1491 | pr_notice("invalidating existing data"); |
| 1492 | /* Don't want invalidate_buckets() to queue a gc yet */ |
| 1493 | closure_lock(&c->gc, NULL); |
| 1494 | |
| 1495 | for_each_cache(ca, c, i) { |
| 1496 | unsigned j; |
| 1497 | |
| 1498 | ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, |
| 1499 | 2, SB_JOURNAL_BUCKETS); |
| 1500 | |
| 1501 | for (j = 0; j < ca->sb.keys; j++) |
| 1502 | ca->sb.d[j] = ca->sb.first_bucket + j; |
| 1503 | } |
| 1504 | |
| 1505 | bch_btree_gc_finish(c); |
| 1506 | |
| 1507 | for_each_cache(ca, c, i) |
| 1508 | closure_call(&ca->alloc, bch_allocator_thread, |
| 1509 | ca->alloc_workqueue, &c->cl); |
| 1510 | |
| 1511 | mutex_lock(&c->bucket_lock); |
| 1512 | for_each_cache(ca, c, i) |
| 1513 | bch_prio_write(ca); |
| 1514 | mutex_unlock(&c->bucket_lock); |
| 1515 | |
| 1516 | wake_up(&c->alloc_wait); |
| 1517 | |
| 1518 | err = "cannot allocate new UUID bucket"; |
| 1519 | if (__uuid_write(c)) |
| 1520 | goto err_unlock_gc; |
| 1521 | |
| 1522 | err = "cannot allocate new btree root"; |
| 1523 | c->root = bch_btree_node_alloc(c, 0, &op.cl); |
| 1524 | if (IS_ERR_OR_NULL(c->root)) |
| 1525 | goto err_unlock_gc; |
| 1526 | |
| 1527 | bkey_copy_key(&c->root->key, &MAX_KEY); |
| 1528 | bch_btree_write(c->root, true, &op); |
| 1529 | |
| 1530 | bch_btree_set_root(c->root); |
| 1531 | rw_unlock(true, c->root); |
| 1532 | |
| 1533 | /* |
| 1534 | * We don't want to write the first journal entry until |
| 1535 | * everything is set up - fortunately journal entries won't be |
| 1536 | * written until the SET_CACHE_SYNC() here: |
| 1537 | */ |
| 1538 | SET_CACHE_SYNC(&c->sb, true); |
| 1539 | |
| 1540 | bch_journal_next(&c->journal); |
| 1541 | bch_journal_meta(c, &op.cl); |
| 1542 | |
| 1543 | /* Unlock */ |
| 1544 | closure_set_stopped(&c->gc.cl); |
| 1545 | closure_put(&c->gc.cl); |
| 1546 | } |
| 1547 | |
| 1548 | closure_sync(&op.cl); |
| 1549 | c->sb.last_mount = get_seconds(); |
| 1550 | bcache_write_super(c); |
| 1551 | |
| 1552 | list_for_each_entry_safe(dc, t, &uncached_devices, list) |
| 1553 | bch_cached_dev_attach(dc, c); |
| 1554 | |
| 1555 | flash_devs_run(c); |
| 1556 | |
| 1557 | return; |
| 1558 | err_unlock_gc: |
| 1559 | closure_set_stopped(&c->gc.cl); |
| 1560 | closure_put(&c->gc.cl); |
| 1561 | err: |
| 1562 | closure_sync(&op.cl); |
| 1563 | /* XXX: test this, it's broken */ |
| 1564 | bch_cache_set_error(c, err); |
| 1565 | } |
| 1566 | |
| 1567 | static bool can_attach_cache(struct cache *ca, struct cache_set *c) |
| 1568 | { |
| 1569 | return ca->sb.block_size == c->sb.block_size && |
| 1570 | ca->sb.bucket_size == c->sb.block_size && |
| 1571 | ca->sb.nr_in_set == c->sb.nr_in_set; |
| 1572 | } |
| 1573 | |
| 1574 | static const char *register_cache_set(struct cache *ca) |
| 1575 | { |
| 1576 | char buf[12]; |
| 1577 | const char *err = "cannot allocate memory"; |
| 1578 | struct cache_set *c; |
| 1579 | |
| 1580 | list_for_each_entry(c, &bch_cache_sets, list) |
| 1581 | if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { |
| 1582 | if (c->cache[ca->sb.nr_this_dev]) |
| 1583 | return "duplicate cache set member"; |
| 1584 | |
| 1585 | if (!can_attach_cache(ca, c)) |
| 1586 | return "cache sb does not match set"; |
| 1587 | |
| 1588 | if (!CACHE_SYNC(&ca->sb)) |
| 1589 | SET_CACHE_SYNC(&c->sb, false); |
| 1590 | |
| 1591 | goto found; |
| 1592 | } |
| 1593 | |
| 1594 | c = bch_cache_set_alloc(&ca->sb); |
| 1595 | if (!c) |
| 1596 | return err; |
| 1597 | |
| 1598 | err = "error creating kobject"; |
| 1599 | if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || |
| 1600 | kobject_add(&c->internal, &c->kobj, "internal")) |
| 1601 | goto err; |
| 1602 | |
| 1603 | if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) |
| 1604 | goto err; |
| 1605 | |
| 1606 | bch_debug_init_cache_set(c); |
| 1607 | |
| 1608 | list_add(&c->list, &bch_cache_sets); |
| 1609 | found: |
| 1610 | sprintf(buf, "cache%i", ca->sb.nr_this_dev); |
| 1611 | if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || |
| 1612 | sysfs_create_link(&c->kobj, &ca->kobj, buf)) |
| 1613 | goto err; |
| 1614 | |
| 1615 | if (ca->sb.seq > c->sb.seq) { |
| 1616 | c->sb.version = ca->sb.version; |
| 1617 | memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); |
| 1618 | c->sb.flags = ca->sb.flags; |
| 1619 | c->sb.seq = ca->sb.seq; |
| 1620 | pr_debug("set version = %llu", c->sb.version); |
| 1621 | } |
| 1622 | |
| 1623 | ca->set = c; |
| 1624 | ca->set->cache[ca->sb.nr_this_dev] = ca; |
| 1625 | c->cache_by_alloc[c->caches_loaded++] = ca; |
| 1626 | |
| 1627 | if (c->caches_loaded == c->sb.nr_in_set) |
| 1628 | run_cache_set(c); |
| 1629 | |
| 1630 | return NULL; |
| 1631 | err: |
| 1632 | bch_cache_set_unregister(c); |
| 1633 | return err; |
| 1634 | } |
| 1635 | |
| 1636 | /* Cache device */ |
| 1637 | |
| 1638 | void bch_cache_release(struct kobject *kobj) |
| 1639 | { |
| 1640 | struct cache *ca = container_of(kobj, struct cache, kobj); |
| 1641 | |
| 1642 | if (ca->set) |
| 1643 | ca->set->cache[ca->sb.nr_this_dev] = NULL; |
| 1644 | |
| 1645 | bch_cache_allocator_exit(ca); |
| 1646 | |
| 1647 | bio_split_pool_free(&ca->bio_split_hook); |
| 1648 | |
| 1649 | if (ca->alloc_workqueue) |
| 1650 | destroy_workqueue(ca->alloc_workqueue); |
| 1651 | |
| 1652 | free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); |
| 1653 | kfree(ca->prio_buckets); |
| 1654 | vfree(ca->buckets); |
| 1655 | |
| 1656 | free_heap(&ca->heap); |
| 1657 | free_fifo(&ca->unused); |
| 1658 | free_fifo(&ca->free_inc); |
| 1659 | free_fifo(&ca->free); |
| 1660 | |
| 1661 | if (ca->sb_bio.bi_inline_vecs[0].bv_page) |
| 1662 | put_page(ca->sb_bio.bi_io_vec[0].bv_page); |
| 1663 | |
| 1664 | if (!IS_ERR_OR_NULL(ca->bdev)) { |
| 1665 | blk_sync_queue(bdev_get_queue(ca->bdev)); |
| 1666 | blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
| 1667 | } |
| 1668 | |
| 1669 | kfree(ca); |
| 1670 | module_put(THIS_MODULE); |
| 1671 | } |
| 1672 | |
| 1673 | static int cache_alloc(struct cache_sb *sb, struct cache *ca) |
| 1674 | { |
| 1675 | size_t free; |
| 1676 | struct bucket *b; |
| 1677 | |
| 1678 | if (!ca) |
| 1679 | return -ENOMEM; |
| 1680 | |
| 1681 | __module_get(THIS_MODULE); |
| 1682 | kobject_init(&ca->kobj, &bch_cache_ktype); |
| 1683 | |
| 1684 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
| 1685 | |
| 1686 | INIT_LIST_HEAD(&ca->discards); |
| 1687 | |
| 1688 | bio_init(&ca->sb_bio); |
| 1689 | ca->sb_bio.bi_max_vecs = 1; |
| 1690 | ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; |
| 1691 | |
| 1692 | bio_init(&ca->journal.bio); |
| 1693 | ca->journal.bio.bi_max_vecs = 8; |
| 1694 | ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; |
| 1695 | |
| 1696 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 9; |
| 1697 | free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2); |
| 1698 | |
| 1699 | if (!init_fifo(&ca->free, free, GFP_KERNEL) || |
| 1700 | !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || |
| 1701 | !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || |
| 1702 | !init_heap(&ca->heap, free << 3, GFP_KERNEL) || |
| 1703 | !(ca->buckets = vmalloc(sizeof(struct bucket) * |
| 1704 | ca->sb.nbuckets)) || |
| 1705 | !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * |
| 1706 | 2, GFP_KERNEL)) || |
| 1707 | !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || |
| 1708 | !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || |
| 1709 | bio_split_pool_init(&ca->bio_split_hook)) |
| 1710 | goto err; |
| 1711 | |
| 1712 | ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); |
| 1713 | |
| 1714 | memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket)); |
| 1715 | for_each_bucket(b, ca) |
| 1716 | atomic_set(&b->pin, 0); |
| 1717 | |
| 1718 | if (bch_cache_allocator_init(ca)) |
| 1719 | goto err; |
| 1720 | |
| 1721 | return 0; |
| 1722 | err: |
| 1723 | kobject_put(&ca->kobj); |
| 1724 | return -ENOMEM; |
| 1725 | } |
| 1726 | |
| 1727 | static const char *register_cache(struct cache_sb *sb, struct page *sb_page, |
| 1728 | struct block_device *bdev, struct cache *ca) |
| 1729 | { |
| 1730 | char name[BDEVNAME_SIZE]; |
| 1731 | const char *err = "cannot allocate memory"; |
| 1732 | |
| 1733 | if (cache_alloc(sb, ca) != 0) |
| 1734 | return err; |
| 1735 | |
| 1736 | ca->sb_bio.bi_io_vec[0].bv_page = sb_page; |
| 1737 | ca->bdev = bdev; |
| 1738 | ca->bdev->bd_holder = ca; |
| 1739 | |
| 1740 | if (blk_queue_discard(bdev_get_queue(ca->bdev))) |
| 1741 | ca->discard = CACHE_DISCARD(&ca->sb); |
| 1742 | |
| 1743 | err = "error creating kobject"; |
| 1744 | if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) |
| 1745 | goto err; |
| 1746 | |
| 1747 | err = register_cache_set(ca); |
| 1748 | if (err) |
| 1749 | goto err; |
| 1750 | |
| 1751 | pr_info("registered cache device %s", bdevname(bdev, name)); |
| 1752 | |
| 1753 | return NULL; |
| 1754 | err: |
| 1755 | kobject_put(&ca->kobj); |
| 1756 | pr_info("error opening %s: %s", bdevname(bdev, name), err); |
| 1757 | /* Return NULL instead of an error because kobject_put() cleans |
| 1758 | * everything up |
| 1759 | */ |
| 1760 | return NULL; |
| 1761 | } |
| 1762 | |
| 1763 | /* Global interfaces/init */ |
| 1764 | |
| 1765 | static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, |
| 1766 | const char *, size_t); |
| 1767 | |
| 1768 | kobj_attribute_write(register, register_bcache); |
| 1769 | kobj_attribute_write(register_quiet, register_bcache); |
| 1770 | |
| 1771 | static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, |
| 1772 | const char *buffer, size_t size) |
| 1773 | { |
| 1774 | ssize_t ret = size; |
| 1775 | const char *err = "cannot allocate memory"; |
| 1776 | char *path = NULL; |
| 1777 | struct cache_sb *sb = NULL; |
| 1778 | struct block_device *bdev = NULL; |
| 1779 | struct page *sb_page = NULL; |
| 1780 | |
| 1781 | if (!try_module_get(THIS_MODULE)) |
| 1782 | return -EBUSY; |
| 1783 | |
| 1784 | mutex_lock(&bch_register_lock); |
| 1785 | |
| 1786 | if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || |
| 1787 | !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) |
| 1788 | goto err; |
| 1789 | |
| 1790 | err = "failed to open device"; |
| 1791 | bdev = blkdev_get_by_path(strim(path), |
| 1792 | FMODE_READ|FMODE_WRITE|FMODE_EXCL, |
| 1793 | sb); |
| 1794 | if (bdev == ERR_PTR(-EBUSY)) |
| 1795 | err = "device busy"; |
| 1796 | |
| 1797 | if (IS_ERR(bdev) || |
| 1798 | set_blocksize(bdev, 4096)) |
| 1799 | goto err; |
| 1800 | |
| 1801 | err = read_super(sb, bdev, &sb_page); |
| 1802 | if (err) |
| 1803 | goto err_close; |
| 1804 | |
| 1805 | if (sb->version == CACHE_BACKING_DEV) { |
| 1806 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); |
| 1807 | |
| 1808 | err = register_bdev(sb, sb_page, bdev, dc); |
| 1809 | } else { |
| 1810 | struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); |
| 1811 | |
| 1812 | err = register_cache(sb, sb_page, bdev, ca); |
| 1813 | } |
| 1814 | |
| 1815 | if (err) { |
| 1816 | /* register_(bdev|cache) will only return an error if they |
| 1817 | * didn't get far enough to create the kobject - if they did, |
| 1818 | * the kobject destructor will do this cleanup. |
| 1819 | */ |
| 1820 | put_page(sb_page); |
| 1821 | err_close: |
| 1822 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
| 1823 | err: |
| 1824 | if (attr != &ksysfs_register_quiet) |
| 1825 | pr_info("error opening %s: %s", path, err); |
| 1826 | ret = -EINVAL; |
| 1827 | } |
| 1828 | |
| 1829 | kfree(sb); |
| 1830 | kfree(path); |
| 1831 | mutex_unlock(&bch_register_lock); |
| 1832 | module_put(THIS_MODULE); |
| 1833 | return ret; |
| 1834 | } |
| 1835 | |
| 1836 | static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) |
| 1837 | { |
| 1838 | if (code == SYS_DOWN || |
| 1839 | code == SYS_HALT || |
| 1840 | code == SYS_POWER_OFF) { |
| 1841 | DEFINE_WAIT(wait); |
| 1842 | unsigned long start = jiffies; |
| 1843 | bool stopped = false; |
| 1844 | |
| 1845 | struct cache_set *c, *tc; |
| 1846 | struct cached_dev *dc, *tdc; |
| 1847 | |
| 1848 | mutex_lock(&bch_register_lock); |
| 1849 | |
| 1850 | if (list_empty(&bch_cache_sets) && |
| 1851 | list_empty(&uncached_devices)) |
| 1852 | goto out; |
| 1853 | |
| 1854 | pr_info("Stopping all devices:"); |
| 1855 | |
| 1856 | list_for_each_entry_safe(c, tc, &bch_cache_sets, list) |
| 1857 | bch_cache_set_stop(c); |
| 1858 | |
| 1859 | list_for_each_entry_safe(dc, tdc, &uncached_devices, list) |
| 1860 | bcache_device_stop(&dc->disk); |
| 1861 | |
| 1862 | /* What's a condition variable? */ |
| 1863 | while (1) { |
| 1864 | long timeout = start + 2 * HZ - jiffies; |
| 1865 | |
| 1866 | stopped = list_empty(&bch_cache_sets) && |
| 1867 | list_empty(&uncached_devices); |
| 1868 | |
| 1869 | if (timeout < 0 || stopped) |
| 1870 | break; |
| 1871 | |
| 1872 | prepare_to_wait(&unregister_wait, &wait, |
| 1873 | TASK_UNINTERRUPTIBLE); |
| 1874 | |
| 1875 | mutex_unlock(&bch_register_lock); |
| 1876 | schedule_timeout(timeout); |
| 1877 | mutex_lock(&bch_register_lock); |
| 1878 | } |
| 1879 | |
| 1880 | finish_wait(&unregister_wait, &wait); |
| 1881 | |
| 1882 | if (stopped) |
| 1883 | pr_info("All devices stopped"); |
| 1884 | else |
| 1885 | pr_notice("Timeout waiting for devices to be closed"); |
| 1886 | out: |
| 1887 | mutex_unlock(&bch_register_lock); |
| 1888 | } |
| 1889 | |
| 1890 | return NOTIFY_DONE; |
| 1891 | } |
| 1892 | |
| 1893 | static struct notifier_block reboot = { |
| 1894 | .notifier_call = bcache_reboot, |
| 1895 | .priority = INT_MAX, /* before any real devices */ |
| 1896 | }; |
| 1897 | |
| 1898 | static void bcache_exit(void) |
| 1899 | { |
| 1900 | bch_debug_exit(); |
| 1901 | bch_writeback_exit(); |
| 1902 | bch_request_exit(); |
| 1903 | bch_btree_exit(); |
| 1904 | if (bcache_kobj) |
| 1905 | kobject_put(bcache_kobj); |
| 1906 | if (bcache_wq) |
| 1907 | destroy_workqueue(bcache_wq); |
| 1908 | unregister_blkdev(bcache_major, "bcache"); |
| 1909 | unregister_reboot_notifier(&reboot); |
| 1910 | } |
| 1911 | |
| 1912 | static int __init bcache_init(void) |
| 1913 | { |
| 1914 | static const struct attribute *files[] = { |
| 1915 | &ksysfs_register.attr, |
| 1916 | &ksysfs_register_quiet.attr, |
| 1917 | NULL |
| 1918 | }; |
| 1919 | |
| 1920 | mutex_init(&bch_register_lock); |
| 1921 | init_waitqueue_head(&unregister_wait); |
| 1922 | register_reboot_notifier(&reboot); |
Kent Overstreet | 07e86cc | 2013-03-25 11:46:43 -0700 | [diff] [blame] | 1923 | closure_debug_init(); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1924 | |
| 1925 | bcache_major = register_blkdev(0, "bcache"); |
| 1926 | if (bcache_major < 0) |
| 1927 | return bcache_major; |
| 1928 | |
| 1929 | if (!(bcache_wq = create_workqueue("bcache")) || |
| 1930 | !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || |
| 1931 | sysfs_create_files(bcache_kobj, files) || |
| 1932 | bch_btree_init() || |
| 1933 | bch_request_init() || |
| 1934 | bch_writeback_init() || |
| 1935 | bch_debug_init(bcache_kobj)) |
| 1936 | goto err; |
| 1937 | |
| 1938 | return 0; |
| 1939 | err: |
| 1940 | bcache_exit(); |
| 1941 | return -ENOMEM; |
| 1942 | } |
| 1943 | |
| 1944 | module_exit(bcache_exit); |
| 1945 | module_init(bcache_init); |