Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 2 | #ifndef _BCACHE_H |
| 3 | #define _BCACHE_H |
| 4 | |
| 5 | /* |
| 6 | * SOME HIGH LEVEL CODE DOCUMENTATION: |
| 7 | * |
| 8 | * Bcache mostly works with cache sets, cache devices, and backing devices. |
| 9 | * |
| 10 | * Support for multiple cache devices hasn't quite been finished off yet, but |
| 11 | * it's about 95% plumbed through. A cache set and its cache devices is sort of |
| 12 | * like a md raid array and its component devices. Most of the code doesn't care |
| 13 | * about individual cache devices, the main abstraction is the cache set. |
| 14 | * |
| 15 | * Multiple cache devices is intended to give us the ability to mirror dirty |
| 16 | * cached data and metadata, without mirroring clean cached data. |
| 17 | * |
| 18 | * Backing devices are different, in that they have a lifetime independent of a |
| 19 | * cache set. When you register a newly formatted backing device it'll come up |
| 20 | * in passthrough mode, and then you can attach and detach a backing device from |
| 21 | * a cache set at runtime - while it's mounted and in use. Detaching implicitly |
| 22 | * invalidates any cached data for that backing device. |
| 23 | * |
| 24 | * A cache set can have multiple (many) backing devices attached to it. |
| 25 | * |
| 26 | * There's also flash only volumes - this is the reason for the distinction |
| 27 | * between struct cached_dev and struct bcache_device. A flash only volume |
| 28 | * works much like a bcache device that has a backing device, except the |
| 29 | * "cached" data is always dirty. The end result is that we get thin |
| 30 | * provisioning with very little additional code. |
| 31 | * |
| 32 | * Flash only volumes work but they're not production ready because the moving |
| 33 | * garbage collector needs more work. More on that later. |
| 34 | * |
| 35 | * BUCKETS/ALLOCATION: |
| 36 | * |
| 37 | * Bcache is primarily designed for caching, which means that in normal |
| 38 | * operation all of our available space will be allocated. Thus, we need an |
| 39 | * efficient way of deleting things from the cache so we can write new things to |
| 40 | * it. |
| 41 | * |
| 42 | * To do this, we first divide the cache device up into buckets. A bucket is the |
| 43 | * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+ |
| 44 | * works efficiently. |
| 45 | * |
| 46 | * Each bucket has a 16 bit priority, and an 8 bit generation associated with |
| 47 | * it. The gens and priorities for all the buckets are stored contiguously and |
| 48 | * packed on disk (in a linked list of buckets - aside from the superblock, all |
| 49 | * of bcache's metadata is stored in buckets). |
| 50 | * |
| 51 | * The priority is used to implement an LRU. We reset a bucket's priority when |
| 52 | * we allocate it or on cache it, and every so often we decrement the priority |
| 53 | * of each bucket. It could be used to implement something more sophisticated, |
| 54 | * if anyone ever gets around to it. |
| 55 | * |
| 56 | * The generation is used for invalidating buckets. Each pointer also has an 8 |
| 57 | * bit generation embedded in it; for a pointer to be considered valid, its gen |
| 58 | * must match the gen of the bucket it points into. Thus, to reuse a bucket all |
| 59 | * we have to do is increment its gen (and write its new gen to disk; we batch |
| 60 | * this up). |
| 61 | * |
| 62 | * Bcache is entirely COW - we never write twice to a bucket, even buckets that |
| 63 | * contain metadata (including btree nodes). |
| 64 | * |
| 65 | * THE BTREE: |
| 66 | * |
| 67 | * Bcache is in large part design around the btree. |
| 68 | * |
| 69 | * At a high level, the btree is just an index of key -> ptr tuples. |
| 70 | * |
| 71 | * Keys represent extents, and thus have a size field. Keys also have a variable |
| 72 | * number of pointers attached to them (potentially zero, which is handy for |
| 73 | * invalidating the cache). |
| 74 | * |
| 75 | * The key itself is an inode:offset pair. The inode number corresponds to a |
| 76 | * backing device or a flash only volume. The offset is the ending offset of the |
| 77 | * extent within the inode - not the starting offset; this makes lookups |
| 78 | * slightly more convenient. |
| 79 | * |
| 80 | * Pointers contain the cache device id, the offset on that device, and an 8 bit |
| 81 | * generation number. More on the gen later. |
| 82 | * |
| 83 | * Index lookups are not fully abstracted - cache lookups in particular are |
| 84 | * still somewhat mixed in with the btree code, but things are headed in that |
| 85 | * direction. |
| 86 | * |
| 87 | * Updates are fairly well abstracted, though. There are two different ways of |
| 88 | * updating the btree; insert and replace. |
| 89 | * |
| 90 | * BTREE_INSERT will just take a list of keys and insert them into the btree - |
| 91 | * overwriting (possibly only partially) any extents they overlap with. This is |
| 92 | * used to update the index after a write. |
| 93 | * |
| 94 | * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is |
| 95 | * overwriting a key that matches another given key. This is used for inserting |
| 96 | * data into the cache after a cache miss, and for background writeback, and for |
| 97 | * the moving garbage collector. |
| 98 | * |
| 99 | * There is no "delete" operation; deleting things from the index is |
| 100 | * accomplished by either by invalidating pointers (by incrementing a bucket's |
| 101 | * gen) or by inserting a key with 0 pointers - which will overwrite anything |
| 102 | * previously present at that location in the index. |
| 103 | * |
| 104 | * This means that there are always stale/invalid keys in the btree. They're |
| 105 | * filtered out by the code that iterates through a btree node, and removed when |
| 106 | * a btree node is rewritten. |
| 107 | * |
| 108 | * BTREE NODES: |
| 109 | * |
| 110 | * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and |
| 111 | * free smaller than a bucket - so, that's how big our btree nodes are. |
| 112 | * |
| 113 | * (If buckets are really big we'll only use part of the bucket for a btree node |
| 114 | * - no less than 1/4th - but a bucket still contains no more than a single |
| 115 | * btree node. I'd actually like to change this, but for now we rely on the |
| 116 | * bucket's gen for deleting btree nodes when we rewrite/split a node.) |
| 117 | * |
| 118 | * Anyways, btree nodes are big - big enough to be inefficient with a textbook |
| 119 | * btree implementation. |
| 120 | * |
| 121 | * The way this is solved is that btree nodes are internally log structured; we |
| 122 | * can append new keys to an existing btree node without rewriting it. This |
| 123 | * means each set of keys we write is sorted, but the node is not. |
| 124 | * |
| 125 | * We maintain this log structure in memory - keeping 1Mb of keys sorted would |
| 126 | * be expensive, and we have to distinguish between the keys we have written and |
| 127 | * the keys we haven't. So to do a lookup in a btree node, we have to search |
| 128 | * each sorted set. But we do merge written sets together lazily, so the cost of |
| 129 | * these extra searches is quite low (normally most of the keys in a btree node |
| 130 | * will be in one big set, and then there'll be one or two sets that are much |
| 131 | * smaller). |
| 132 | * |
| 133 | * This log structure makes bcache's btree more of a hybrid between a |
| 134 | * conventional btree and a compacting data structure, with some of the |
| 135 | * advantages of both. |
| 136 | * |
| 137 | * GARBAGE COLLECTION: |
| 138 | * |
| 139 | * We can't just invalidate any bucket - it might contain dirty data or |
| 140 | * metadata. If it once contained dirty data, other writes might overwrite it |
| 141 | * later, leaving no valid pointers into that bucket in the index. |
| 142 | * |
| 143 | * Thus, the primary purpose of garbage collection is to find buckets to reuse. |
| 144 | * It also counts how much valid data it each bucket currently contains, so that |
| 145 | * allocation can reuse buckets sooner when they've been mostly overwritten. |
| 146 | * |
| 147 | * It also does some things that are really internal to the btree |
| 148 | * implementation. If a btree node contains pointers that are stale by more than |
| 149 | * some threshold, it rewrites the btree node to avoid the bucket's generation |
| 150 | * wrapping around. It also merges adjacent btree nodes if they're empty enough. |
| 151 | * |
| 152 | * THE JOURNAL: |
| 153 | * |
| 154 | * Bcache's journal is not necessary for consistency; we always strictly |
| 155 | * order metadata writes so that the btree and everything else is consistent on |
| 156 | * disk in the event of an unclean shutdown, and in fact bcache had writeback |
| 157 | * caching (with recovery from unclean shutdown) before journalling was |
| 158 | * implemented. |
| 159 | * |
| 160 | * Rather, the journal is purely a performance optimization; we can't complete a |
| 161 | * write until we've updated the index on disk, otherwise the cache would be |
| 162 | * inconsistent in the event of an unclean shutdown. This means that without the |
| 163 | * journal, on random write workloads we constantly have to update all the leaf |
| 164 | * nodes in the btree, and those writes will be mostly empty (appending at most |
| 165 | * a few keys each) - highly inefficient in terms of amount of metadata writes, |
| 166 | * and it puts more strain on the various btree resorting/compacting code. |
| 167 | * |
| 168 | * The journal is just a log of keys we've inserted; on startup we just reinsert |
| 169 | * all the keys in the open journal entries. That means that when we're updating |
| 170 | * a node in the btree, we can wait until a 4k block of keys fills up before |
| 171 | * writing them out. |
| 172 | * |
| 173 | * For simplicity, we only journal updates to leaf nodes; updates to parent |
| 174 | * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth |
| 175 | * the complexity to deal with journalling them (in particular, journal replay) |
| 176 | * - updates to non leaf nodes just happen synchronously (see btree_split()). |
| 177 | */ |
| 178 | |
| 179 | #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ |
| 180 | |
Kent Overstreet | 81ab419 | 2013-10-31 15:46:42 -0700 | [diff] [blame] | 181 | #include <linux/bcache.h> |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 182 | #include <linux/bio.h> |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 183 | #include <linux/kobject.h> |
| 184 | #include <linux/list.h> |
| 185 | #include <linux/mutex.h> |
| 186 | #include <linux/rbtree.h> |
| 187 | #include <linux/rwsem.h> |
Elena Reshetova | 3b304d2 | 2017-10-30 14:46:32 -0700 | [diff] [blame] | 188 | #include <linux/refcount.h> |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 189 | #include <linux/types.h> |
| 190 | #include <linux/workqueue.h> |
Coly Li | 771f393 | 2018-03-18 17:36:17 -0700 | [diff] [blame] | 191 | #include <linux/kthread.h> |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 192 | |
Kent Overstreet | 67539e8 | 2013-09-10 22:53:34 -0700 | [diff] [blame] | 193 | #include "bset.h" |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 194 | #include "util.h" |
| 195 | #include "closure.h" |
| 196 | |
| 197 | struct bucket { |
| 198 | atomic_t pin; |
| 199 | uint16_t prio; |
| 200 | uint8_t gen; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 201 | uint8_t last_gc; /* Most out of date gen in the btree */ |
Nicholas Swenson | 981aa8c | 2013-11-07 17:53:19 -0800 | [diff] [blame] | 202 | uint16_t gc_mark; /* Bitfield used by GC. See below for field */ |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 203 | }; |
| 204 | |
| 205 | /* |
| 206 | * I'd use bitfields for these, but I don't trust the compiler not to screw me |
| 207 | * as multiple threads touch struct bucket without locking |
| 208 | */ |
| 209 | |
| 210 | BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); |
Kent Overstreet | 4fe6a81 | 2014-03-13 13:46:29 -0700 | [diff] [blame] | 211 | #define GC_MARK_RECLAIMABLE 1 |
| 212 | #define GC_MARK_DIRTY 2 |
| 213 | #define GC_MARK_METADATA 3 |
Darrick J. Wong | 9471744 | 2014-01-28 16:57:39 -0800 | [diff] [blame] | 214 | #define GC_SECTORS_USED_SIZE 13 |
| 215 | #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) |
| 216 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); |
Nicholas Swenson | 981aa8c | 2013-11-07 17:53:19 -0800 | [diff] [blame] | 217 | BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 218 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 219 | #include "journal.h" |
| 220 | #include "stats.h" |
| 221 | struct search; |
| 222 | struct btree; |
| 223 | struct keybuf; |
| 224 | |
| 225 | struct keybuf_key { |
| 226 | struct rb_node node; |
| 227 | BKEY_PADDED(key); |
| 228 | void *private; |
| 229 | }; |
| 230 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 231 | struct keybuf { |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 232 | struct bkey last_scanned; |
| 233 | spinlock_t lock; |
| 234 | |
| 235 | /* |
| 236 | * Beginning and end of range in rb tree - so that we can skip taking |
| 237 | * lock and checking the rb tree when we need to check for overlapping |
| 238 | * keys. |
| 239 | */ |
| 240 | struct bkey start; |
| 241 | struct bkey end; |
| 242 | |
| 243 | struct rb_root keys; |
| 244 | |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 245 | #define KEYBUF_NR 500 |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 246 | DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR); |
| 247 | }; |
| 248 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 249 | struct bcache_device { |
| 250 | struct closure cl; |
| 251 | |
| 252 | struct kobject kobj; |
| 253 | |
| 254 | struct cache_set *c; |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 255 | unsigned int id; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 256 | #define BCACHEDEVNAME_SIZE 12 |
| 257 | char name[BCACHEDEVNAME_SIZE]; |
| 258 | |
| 259 | struct gendisk *disk; |
| 260 | |
Kent Overstreet | c4d951d | 2013-08-21 17:49:09 -0700 | [diff] [blame] | 261 | unsigned long flags; |
Coly Li | 3fd47bf | 2018-03-18 17:36:16 -0700 | [diff] [blame] | 262 | #define BCACHE_DEV_CLOSING 0 |
| 263 | #define BCACHE_DEV_DETACHING 1 |
| 264 | #define BCACHE_DEV_UNLINK_DONE 2 |
| 265 | #define BCACHE_DEV_WB_RUNNING 3 |
| 266 | #define BCACHE_DEV_RATE_DW_RUNNING 4 |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 267 | unsigned int nr_stripes; |
| 268 | unsigned int stripe_size; |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 269 | atomic_t *stripe_sectors_dirty; |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 270 | unsigned long *full_dirty_stripes; |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 271 | |
Kent Overstreet | d19936a | 2018-05-20 18:25:51 -0400 | [diff] [blame] | 272 | struct bio_set bio_split; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 273 | |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 274 | unsigned int data_csum:1; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 275 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 276 | int (*cache_miss)(struct btree *b, struct search *s, |
| 277 | struct bio *bio, unsigned int sectors); |
Coly Li | d0c1b89 | 2018-08-11 13:19:59 +0800 | [diff] [blame] | 278 | int (*ioctl)(struct bcache_device *d, fmode_t mode, |
| 279 | unsigned int cmd, unsigned long arg); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 280 | }; |
| 281 | |
| 282 | struct io { |
| 283 | /* Used to track sequential IO so it can be skipped */ |
| 284 | struct hlist_node hash; |
| 285 | struct list_head lru; |
| 286 | |
| 287 | unsigned long jiffies; |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 288 | unsigned int sequential; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 289 | sector_t last; |
| 290 | }; |
| 291 | |
Coly Li | 7e027ca | 2018-03-18 17:36:18 -0700 | [diff] [blame] | 292 | enum stop_on_failure { |
| 293 | BCH_CACHED_DEV_STOP_AUTO = 0, |
| 294 | BCH_CACHED_DEV_STOP_ALWAYS, |
| 295 | BCH_CACHED_DEV_STOP_MODE_MAX, |
| 296 | }; |
| 297 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 298 | struct cached_dev { |
| 299 | struct list_head list; |
| 300 | struct bcache_device disk; |
| 301 | struct block_device *bdev; |
| 302 | |
| 303 | struct cache_sb sb; |
| 304 | struct bio sb_bio; |
| 305 | struct bio_vec sb_bv[1]; |
Kent Overstreet | cb7a583 | 2013-12-16 15:27:25 -0800 | [diff] [blame] | 306 | struct closure sb_write; |
| 307 | struct semaphore sb_write_mutex; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 308 | |
| 309 | /* Refcount on the cache set. Always nonzero when we're caching. */ |
Elena Reshetova | 3b304d2 | 2017-10-30 14:46:32 -0700 | [diff] [blame] | 310 | refcount_t count; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 311 | struct work_struct detach; |
| 312 | |
| 313 | /* |
| 314 | * Device might not be running if it's dirty and the cache set hasn't |
| 315 | * showed up yet. |
| 316 | */ |
| 317 | atomic_t running; |
| 318 | |
| 319 | /* |
| 320 | * Writes take a shared lock from start to finish; scanning for dirty |
| 321 | * data to refill the rb tree requires an exclusive lock. |
| 322 | */ |
| 323 | struct rw_semaphore writeback_lock; |
| 324 | |
| 325 | /* |
| 326 | * Nonzero, and writeback has a refcount (d->count), iff there is dirty |
| 327 | * data in the cache. Protected by writeback_lock; must have an |
| 328 | * shared lock to set and exclusive lock to clear. |
| 329 | */ |
| 330 | atomic_t has_dirty; |
| 331 | |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 332 | struct bch_ratelimit writeback_rate; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 333 | struct delayed_work writeback_rate_update; |
| 334 | |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 335 | /* Limit number of writeback bios in flight */ |
| 336 | struct semaphore in_flight; |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 337 | struct task_struct *writeback_thread; |
Tang Junhui | 9baf309 | 2017-09-06 14:25:59 +0800 | [diff] [blame] | 338 | struct workqueue_struct *writeback_write_wq; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 339 | |
| 340 | struct keybuf writeback_keys; |
| 341 | |
Coly Li | 0f0709e | 2018-05-28 15:37:41 +0800 | [diff] [blame] | 342 | struct task_struct *status_update_thread; |
Michael Lyle | 6e6ccc6 | 2018-01-08 12:21:23 -0800 | [diff] [blame] | 343 | /* |
| 344 | * Order the write-half of writeback operations strongly in dispatch |
| 345 | * order. (Maintain LBA order; don't allow reads completing out of |
| 346 | * order to re-order the writes...) |
| 347 | */ |
| 348 | struct closure_waitlist writeback_ordering_wait; |
| 349 | atomic_t writeback_sequence_next; |
| 350 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 351 | /* For tracking sequential IO */ |
| 352 | #define RECENT_IO_BITS 7 |
| 353 | #define RECENT_IO (1 << RECENT_IO_BITS) |
| 354 | struct io io[RECENT_IO]; |
| 355 | struct hlist_head io_hash[RECENT_IO + 1]; |
| 356 | struct list_head io_lru; |
| 357 | spinlock_t io_lock; |
| 358 | |
| 359 | struct cache_accounting accounting; |
| 360 | |
| 361 | /* The rest of this all shows up in sysfs */ |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 362 | unsigned int sequential_cutoff; |
| 363 | unsigned int readahead; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 364 | |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 365 | unsigned int io_disable:1; |
| 366 | unsigned int verify:1; |
| 367 | unsigned int bypass_torture_test:1; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 368 | |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 369 | unsigned int partial_stripes_expensive:1; |
| 370 | unsigned int writeback_metadata:1; |
| 371 | unsigned int writeback_running:1; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 372 | unsigned char writeback_percent; |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 373 | unsigned int writeback_delay; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 374 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 375 | uint64_t writeback_rate_target; |
Kent Overstreet | 16749c2 | 2013-11-11 13:58:34 -0800 | [diff] [blame] | 376 | int64_t writeback_rate_proportional; |
Michael Lyle | 1d316e6 | 2017-10-13 16:35:36 -0700 | [diff] [blame] | 377 | int64_t writeback_rate_integral; |
| 378 | int64_t writeback_rate_integral_scaled; |
Michael Lyle | e41166c | 2017-10-13 16:35:38 -0700 | [diff] [blame] | 379 | int32_t writeback_rate_change; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 380 | |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 381 | unsigned int writeback_rate_update_seconds; |
| 382 | unsigned int writeback_rate_i_term_inverse; |
| 383 | unsigned int writeback_rate_p_term_inverse; |
| 384 | unsigned int writeback_rate_minimum; |
Coly Li | 7e027ca | 2018-03-18 17:36:18 -0700 | [diff] [blame] | 385 | |
| 386 | enum stop_on_failure stop_when_cache_set_failed; |
Coly Li | c7b7bd0 | 2018-03-18 17:36:25 -0700 | [diff] [blame] | 387 | #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 |
| 388 | atomic_t io_errors; |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 389 | unsigned int error_limit; |
| 390 | unsigned int offline_seconds; |
Coly Li | 6e916a7 | 2018-05-03 18:51:32 +0800 | [diff] [blame] | 391 | |
| 392 | char backing_dev_name[BDEVNAME_SIZE]; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 393 | }; |
| 394 | |
Kent Overstreet | 7836541 | 2013-12-17 01:29:34 -0800 | [diff] [blame] | 395 | enum alloc_reserve { |
| 396 | RESERVE_BTREE, |
| 397 | RESERVE_PRIO, |
| 398 | RESERVE_MOVINGGC, |
| 399 | RESERVE_NONE, |
| 400 | RESERVE_NR, |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 401 | }; |
| 402 | |
| 403 | struct cache { |
| 404 | struct cache_set *set; |
| 405 | struct cache_sb sb; |
| 406 | struct bio sb_bio; |
| 407 | struct bio_vec sb_bv[1]; |
| 408 | |
| 409 | struct kobject kobj; |
| 410 | struct block_device *bdev; |
| 411 | |
Kent Overstreet | 119ba0f | 2013-04-24 19:01:12 -0700 | [diff] [blame] | 412 | struct task_struct *alloc_thread; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 413 | |
| 414 | struct closure prio; |
| 415 | struct prio_set *disk_buckets; |
| 416 | |
| 417 | /* |
| 418 | * When allocating new buckets, prio_write() gets first dibs - since we |
| 419 | * may not be allocate at all without writing priorities and gens. |
Coly Li | cb329de | 2018-08-09 15:48:46 +0800 | [diff] [blame] | 420 | * prio_last_buckets[] contains the last buckets we wrote priorities to |
| 421 | * (so gc can mark them as metadata), prio_buckets[] contains the |
| 422 | * buckets allocated for the next prio write. |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 423 | */ |
| 424 | uint64_t *prio_buckets; |
| 425 | uint64_t *prio_last_buckets; |
| 426 | |
| 427 | /* |
| 428 | * free: Buckets that are ready to be used |
| 429 | * |
| 430 | * free_inc: Incoming buckets - these are buckets that currently have |
| 431 | * cached data in them, and we can't reuse them until after we write |
| 432 | * their new gen to disk. After prio_write() finishes writing the new |
| 433 | * gens/prios, they'll be moved to the free list (and possibly discarded |
| 434 | * in the process) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 435 | */ |
Kent Overstreet | 7836541 | 2013-12-17 01:29:34 -0800 | [diff] [blame] | 436 | DECLARE_FIFO(long, free)[RESERVE_NR]; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 437 | DECLARE_FIFO(long, free_inc); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 438 | |
| 439 | size_t fifo_last_bucket; |
| 440 | |
| 441 | /* Allocation stuff: */ |
| 442 | struct bucket *buckets; |
| 443 | |
| 444 | DECLARE_HEAP(struct bucket *, heap); |
| 445 | |
| 446 | /* |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 447 | * If nonzero, we know we aren't going to find any buckets to invalidate |
| 448 | * until a gc finishes - otherwise we could pointlessly burn a ton of |
| 449 | * cpu |
| 450 | */ |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 451 | unsigned int invalidate_needs_gc; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 452 | |
| 453 | bool discard; /* Get rid of? */ |
| 454 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 455 | struct journal_device journal; |
| 456 | |
| 457 | /* The rest of this all shows up in sysfs */ |
| 458 | #define IO_ERROR_SHIFT 20 |
| 459 | atomic_t io_errors; |
| 460 | atomic_t io_count; |
| 461 | |
| 462 | atomic_long_t meta_sectors_written; |
| 463 | atomic_long_t btree_sectors_written; |
| 464 | atomic_long_t sectors_written; |
Coly Li | 6e916a7 | 2018-05-03 18:51:32 +0800 | [diff] [blame] | 465 | |
| 466 | char cache_dev_name[BDEVNAME_SIZE]; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 467 | }; |
| 468 | |
| 469 | struct gc_stat { |
| 470 | size_t nodes; |
Tang Junhui | 5c25c4f | 2018-07-26 12:17:34 +0800 | [diff] [blame] | 471 | size_t nodes_pre; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 472 | size_t key_bytes; |
| 473 | |
| 474 | size_t nkeys; |
| 475 | uint64_t data; /* sectors */ |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 476 | unsigned int in_use; /* percent */ |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 477 | }; |
| 478 | |
| 479 | /* |
| 480 | * Flag bits, for how the cache set is shutting down, and what phase it's at: |
| 481 | * |
| 482 | * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching |
| 483 | * all the backing devices first (their cached data gets invalidated, and they |
| 484 | * won't automatically reattach). |
| 485 | * |
| 486 | * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; |
| 487 | * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. |
| 488 | * flushing dirty data). |
Slava Pestov | bf0c55c | 2014-07-11 12:17:41 -0700 | [diff] [blame] | 489 | * |
| 490 | * CACHE_SET_RUNNING means all cache devices have been registered and journal |
| 491 | * replay is complete. |
Coly Li | 771f393 | 2018-03-18 17:36:17 -0700 | [diff] [blame] | 492 | * |
| 493 | * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all |
| 494 | * external and internal I/O should be denied when this flag is set. |
| 495 | * |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 496 | */ |
| 497 | #define CACHE_SET_UNREGISTERING 0 |
| 498 | #define CACHE_SET_STOPPING 1 |
Slava Pestov | bf0c55c | 2014-07-11 12:17:41 -0700 | [diff] [blame] | 499 | #define CACHE_SET_RUNNING 2 |
Coly Li | 771f393 | 2018-03-18 17:36:17 -0700 | [diff] [blame] | 500 | #define CACHE_SET_IO_DISABLE 3 |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 501 | |
| 502 | struct cache_set { |
| 503 | struct closure cl; |
| 504 | |
| 505 | struct list_head list; |
| 506 | struct kobject kobj; |
| 507 | struct kobject internal; |
| 508 | struct dentry *debug; |
| 509 | struct cache_accounting accounting; |
| 510 | |
| 511 | unsigned long flags; |
Coly Li | ea8c5356 | 2018-08-09 15:48:49 +0800 | [diff] [blame] | 512 | atomic_t idle_counter; |
| 513 | atomic_t at_max_writeback_rate; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 514 | |
| 515 | struct cache_sb sb; |
| 516 | |
| 517 | struct cache *cache[MAX_CACHES_PER_SET]; |
| 518 | struct cache *cache_by_alloc[MAX_CACHES_PER_SET]; |
| 519 | int caches_loaded; |
| 520 | |
| 521 | struct bcache_device **devices; |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 522 | unsigned int devices_max_used; |
Coly Li | ea8c5356 | 2018-08-09 15:48:49 +0800 | [diff] [blame] | 523 | atomic_t attached_dev_nr; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 524 | struct list_head cached_devs; |
| 525 | uint64_t cached_dev_sectors; |
Tang Junhui | 99a27d5 | 2018-07-26 12:17:33 +0800 | [diff] [blame] | 526 | atomic_long_t flash_dev_dirty_sectors; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 527 | struct closure caching; |
| 528 | |
Kent Overstreet | cb7a583 | 2013-12-16 15:27:25 -0800 | [diff] [blame] | 529 | struct closure sb_write; |
| 530 | struct semaphore sb_write_mutex; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 531 | |
Kent Overstreet | d19936a | 2018-05-20 18:25:51 -0400 | [diff] [blame] | 532 | mempool_t search; |
| 533 | mempool_t bio_meta; |
| 534 | struct bio_set bio_split; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 535 | |
| 536 | /* For the btree cache */ |
| 537 | struct shrinker shrink; |
| 538 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 539 | /* For the btree cache and anything allocation related */ |
| 540 | struct mutex bucket_lock; |
| 541 | |
| 542 | /* log2(bucket_size), in sectors */ |
| 543 | unsigned short bucket_bits; |
| 544 | |
| 545 | /* log2(block_size), in sectors */ |
| 546 | unsigned short block_bits; |
| 547 | |
| 548 | /* |
| 549 | * Default number of pages for a new btree node - may be less than a |
| 550 | * full bucket |
| 551 | */ |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 552 | unsigned int btree_pages; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 553 | |
| 554 | /* |
| 555 | * Lists of struct btrees; lru is the list for structs that have memory |
| 556 | * allocated for actual btree node, freed is for structs that do not. |
| 557 | * |
| 558 | * We never free a struct btree, except on shutdown - we just put it on |
| 559 | * the btree_cache_freed list and reuse it later. This simplifies the |
| 560 | * code, and it doesn't cost us much memory as the memory usage is |
| 561 | * dominated by buffers that hold the actual btree node data and those |
| 562 | * can be freed - and the number of struct btrees allocated is |
| 563 | * effectively bounded. |
| 564 | * |
| 565 | * btree_cache_freeable effectively is a small cache - we use it because |
| 566 | * high order page allocations can be rather expensive, and it's quite |
| 567 | * common to delete and allocate btree nodes in quick succession. It |
| 568 | * should never grow past ~2-3 nodes in practice. |
| 569 | */ |
| 570 | struct list_head btree_cache; |
| 571 | struct list_head btree_cache_freeable; |
| 572 | struct list_head btree_cache_freed; |
| 573 | |
| 574 | /* Number of elements in btree_cache + btree_cache_freeable lists */ |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 575 | unsigned int btree_cache_used; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 576 | |
| 577 | /* |
| 578 | * If we need to allocate memory for a new btree node and that |
| 579 | * allocation fails, we can cannibalize another node in the btree cache |
Kent Overstreet | 0a63b66 | 2014-03-17 17:15:53 -0700 | [diff] [blame] | 580 | * to satisfy the allocation - lock to guarantee only one thread does |
| 581 | * this at a time: |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 582 | */ |
Kent Overstreet | 0a63b66 | 2014-03-17 17:15:53 -0700 | [diff] [blame] | 583 | wait_queue_head_t btree_cache_wait; |
| 584 | struct task_struct *btree_cache_alloc_lock; |
Guoju Fang | 34cf78b | 2019-11-13 16:03:16 +0800 | [diff] [blame] | 585 | spinlock_t btree_cannibalize_lock; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 586 | |
| 587 | /* |
| 588 | * When we free a btree node, we increment the gen of the bucket the |
| 589 | * node is in - but we can't rewrite the prios and gens until we |
| 590 | * finished whatever it is we were doing, otherwise after a crash the |
| 591 | * btree node would be freed but for say a split, we might not have the |
| 592 | * pointers to the new nodes inserted into the btree yet. |
| 593 | * |
| 594 | * This is a refcount that blocks prio_write() until the new keys are |
| 595 | * written. |
| 596 | */ |
| 597 | atomic_t prio_blocked; |
Kent Overstreet | 35fcd84 | 2013-07-24 17:29:09 -0700 | [diff] [blame] | 598 | wait_queue_head_t bucket_wait; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 599 | |
| 600 | /* |
| 601 | * For any bio we don't skip we subtract the number of sectors from |
| 602 | * rescale; when it hits 0 we rescale all the bucket priorities. |
| 603 | */ |
| 604 | atomic_t rescale; |
| 605 | /* |
Tang Junhui | 5c25c4f | 2018-07-26 12:17:34 +0800 | [diff] [blame] | 606 | * used for GC, identify if any front side I/Os is inflight |
| 607 | */ |
| 608 | atomic_t search_inflight; |
| 609 | /* |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 610 | * When we invalidate buckets, we use both the priority and the amount |
| 611 | * of good data to determine which buckets to reuse first - to weight |
| 612 | * those together consistently we keep track of the smallest nonzero |
| 613 | * priority of any bucket. |
| 614 | */ |
| 615 | uint16_t min_prio; |
| 616 | |
| 617 | /* |
Coly Li | b0d3098 | 2018-08-11 13:19:47 +0800 | [diff] [blame] | 618 | * max(gen - last_gc) for all buckets. When it gets too big we have to |
| 619 | * gc to keep gens from wrapping around. |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 620 | */ |
| 621 | uint8_t need_gc; |
| 622 | struct gc_stat gc_stats; |
| 623 | size_t nbuckets; |
Tang Junhui | d44c2f9 | 2017-10-30 14:46:33 -0700 | [diff] [blame] | 624 | size_t avail_nbuckets; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 625 | |
Kent Overstreet | 72a4451 | 2013-10-24 17:19:26 -0700 | [diff] [blame] | 626 | struct task_struct *gc_thread; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 627 | /* Where in the btree gc currently is */ |
| 628 | struct bkey gc_done; |
| 629 | |
| 630 | /* |
Coly Li | 7a671d8 | 2018-12-13 22:53:53 +0800 | [diff] [blame] | 631 | * For automatical garbage collection after writeback completed, this |
| 632 | * varialbe is used as bit fields, |
| 633 | * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback |
| 634 | * - 0000 0010b (BCH_DO_AUTO_GC): do gc after writeback |
| 635 | * This is an optimization for following write request after writeback |
| 636 | * finished, but read hit rate dropped due to clean data on cache is |
| 637 | * discarded. Unless user explicitly sets it via sysfs, it won't be |
| 638 | * enabled. |
| 639 | */ |
| 640 | #define BCH_ENABLE_AUTO_GC 1 |
| 641 | #define BCH_DO_AUTO_GC 2 |
| 642 | uint8_t gc_after_writeback; |
| 643 | |
| 644 | /* |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 645 | * The allocation code needs gc_mark in struct bucket to be correct, but |
| 646 | * it's not while a gc is in progress. Protected by bucket_lock. |
| 647 | */ |
| 648 | int gc_mark_valid; |
| 649 | |
| 650 | /* Counts how many sectors bio_insert has added to the cache */ |
| 651 | atomic_t sectors_to_gc; |
Kent Overstreet | be628be | 2016-10-26 20:31:17 -0700 | [diff] [blame] | 652 | wait_queue_head_t gc_wait; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 653 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 654 | struct keybuf moving_gc_keys; |
| 655 | /* Number of moving GC bios in flight */ |
Kent Overstreet | 72a4451 | 2013-10-24 17:19:26 -0700 | [diff] [blame] | 656 | struct semaphore moving_in_flight; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 657 | |
Nicholas Swenson | da415a0 | 2014-01-09 16:03:04 -0800 | [diff] [blame] | 658 | struct workqueue_struct *moving_gc_wq; |
| 659 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 660 | struct btree *root; |
| 661 | |
| 662 | #ifdef CONFIG_BCACHE_DEBUG |
| 663 | struct btree *verify_data; |
Kent Overstreet | 78b77bf | 2013-12-17 22:49:08 -0800 | [diff] [blame] | 664 | struct bset *verify_ondisk; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 665 | struct mutex verify_lock; |
| 666 | #endif |
| 667 | |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 668 | unsigned int nr_uuids; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 669 | struct uuid_entry *uuids; |
| 670 | BKEY_PADDED(uuid_bucket); |
Kent Overstreet | cb7a583 | 2013-12-16 15:27:25 -0800 | [diff] [blame] | 671 | struct closure uuid_write; |
| 672 | struct semaphore uuid_write_mutex; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 673 | |
| 674 | /* |
| 675 | * A btree node on disk could have too many bsets for an iterator to fit |
Shenghui Wang | d2f96f4 | 2018-12-13 22:53:46 +0800 | [diff] [blame] | 676 | * on the stack - have to dynamically allocate them. |
| 677 | * bch_cache_set_alloc() will make sure the pool can allocate iterators |
| 678 | * equipped with enough room that can host |
| 679 | * (sb.bucket_size / sb.block_size) |
| 680 | * btree_iter_sets, which is more than static MAX_BSETS. |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 681 | */ |
Kent Overstreet | d19936a | 2018-05-20 18:25:51 -0400 | [diff] [blame] | 682 | mempool_t fill_iter; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 683 | |
Kent Overstreet | 67539e8 | 2013-09-10 22:53:34 -0700 | [diff] [blame] | 684 | struct bset_sort_state sort; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 685 | |
| 686 | /* List of buckets we're currently writing data to */ |
| 687 | struct list_head data_buckets; |
| 688 | spinlock_t data_bucket_lock; |
| 689 | |
| 690 | struct journal journal; |
| 691 | |
| 692 | #define CONGESTED_MAX 1024 |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 693 | unsigned int congested_last_us; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 694 | atomic_t congested; |
| 695 | |
| 696 | /* The rest of this all shows up in sysfs */ |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 697 | unsigned int congested_read_threshold_us; |
| 698 | unsigned int congested_write_threshold_us; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 699 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 700 | struct time_stats btree_gc_time; |
| 701 | struct time_stats btree_split_time; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 702 | struct time_stats btree_read_time; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 703 | |
| 704 | atomic_long_t cache_read_races; |
| 705 | atomic_long_t writeback_keys_done; |
| 706 | atomic_long_t writeback_keys_failed; |
Kent Overstreet | 77c320e | 2013-07-11 19:42:51 -0700 | [diff] [blame] | 707 | |
Tang Junhui | a728eac | 2018-02-07 11:41:39 -0800 | [diff] [blame] | 708 | atomic_long_t reclaim; |
Coly Li | dff90d5 | 2019-06-28 20:00:00 +0800 | [diff] [blame] | 709 | atomic_long_t reclaimed_journal_buckets; |
Tang Junhui | a728eac | 2018-02-07 11:41:39 -0800 | [diff] [blame] | 710 | atomic_long_t flush_write; |
Tang Junhui | a728eac | 2018-02-07 11:41:39 -0800 | [diff] [blame] | 711 | |
Kent Overstreet | 77c320e | 2013-07-11 19:42:51 -0700 | [diff] [blame] | 712 | enum { |
| 713 | ON_ERROR_UNREGISTER, |
| 714 | ON_ERROR_PANIC, |
| 715 | } on_error; |
Coly Li | 7ba0d83 | 2018-02-07 11:41:42 -0800 | [diff] [blame] | 716 | #define DEFAULT_IO_ERROR_LIMIT 8 |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 717 | unsigned int error_limit; |
| 718 | unsigned int error_decay; |
Kent Overstreet | 77c320e | 2013-07-11 19:42:51 -0700 | [diff] [blame] | 719 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 720 | unsigned short journal_delay_ms; |
Kent Overstreet | a85e968 | 2013-12-20 17:28:16 -0800 | [diff] [blame] | 721 | bool expensive_debug_checks; |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 722 | unsigned int verify:1; |
| 723 | unsigned int key_merging_disabled:1; |
| 724 | unsigned int gc_always_rewrite:1; |
| 725 | unsigned int shrinker_disabled:1; |
| 726 | unsigned int copy_gc_enabled:1; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 727 | |
| 728 | #define BUCKET_HASH_BITS 12 |
| 729 | struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; |
| 730 | }; |
| 731 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 732 | struct bbio { |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 733 | unsigned int submit_time_us; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 734 | union { |
| 735 | struct bkey key; |
| 736 | uint64_t _pad[3]; |
| 737 | /* |
| 738 | * We only need pad = 3 here because we only ever carry around a |
| 739 | * single pointer - i.e. the pointer we're doing io to/from. |
| 740 | */ |
| 741 | }; |
| 742 | struct bio bio; |
| 743 | }; |
| 744 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 745 | #define BTREE_PRIO USHRT_MAX |
Kent Overstreet | e0a985a | 2013-11-12 13:49:10 -0800 | [diff] [blame] | 746 | #define INITIAL_PRIO 32768U |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 747 | |
| 748 | #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) |
| 749 | #define btree_blocks(b) \ |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 750 | ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 751 | |
| 752 | #define btree_default_blocks(c) \ |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 753 | ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 754 | |
| 755 | #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) |
| 756 | #define bucket_bytes(c) ((c)->sb.bucket_size << 9) |
| 757 | #define block_bytes(c) ((c)->sb.block_size << 9) |
| 758 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 759 | #define prios_per_bucket(c) \ |
| 760 | ((bucket_bytes(c) - sizeof(struct prio_set)) / \ |
| 761 | sizeof(struct bucket_disk)) |
| 762 | #define prio_buckets(c) \ |
| 763 | DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c)) |
| 764 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 765 | static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) |
| 766 | { |
| 767 | return s >> c->bucket_bits; |
| 768 | } |
| 769 | |
| 770 | static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) |
| 771 | { |
| 772 | return ((sector_t) b) << c->bucket_bits; |
| 773 | } |
| 774 | |
| 775 | static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) |
| 776 | { |
| 777 | return s & (c->sb.bucket_size - 1); |
| 778 | } |
| 779 | |
| 780 | static inline struct cache *PTR_CACHE(struct cache_set *c, |
| 781 | const struct bkey *k, |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 782 | unsigned int ptr) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 783 | { |
| 784 | return c->cache[PTR_DEV(k, ptr)]; |
| 785 | } |
| 786 | |
| 787 | static inline size_t PTR_BUCKET_NR(struct cache_set *c, |
| 788 | const struct bkey *k, |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 789 | unsigned int ptr) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 790 | { |
| 791 | return sector_to_bucket(c, PTR_OFFSET(k, ptr)); |
| 792 | } |
| 793 | |
| 794 | static inline struct bucket *PTR_BUCKET(struct cache_set *c, |
| 795 | const struct bkey *k, |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 796 | unsigned int ptr) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 797 | { |
| 798 | return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); |
| 799 | } |
| 800 | |
Kent Overstreet | 9a02b7e | 2013-12-20 17:24:46 -0800 | [diff] [blame] | 801 | static inline uint8_t gen_after(uint8_t a, uint8_t b) |
| 802 | { |
| 803 | uint8_t r = a - b; |
Coly Li | 1fae7cf | 2018-08-11 13:19:45 +0800 | [diff] [blame] | 804 | |
Kent Overstreet | 9a02b7e | 2013-12-20 17:24:46 -0800 | [diff] [blame] | 805 | return r > 128U ? 0 : r; |
| 806 | } |
| 807 | |
| 808 | static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 809 | unsigned int i) |
Kent Overstreet | 9a02b7e | 2013-12-20 17:24:46 -0800 | [diff] [blame] | 810 | { |
| 811 | return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); |
| 812 | } |
| 813 | |
| 814 | static inline bool ptr_available(struct cache_set *c, const struct bkey *k, |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 815 | unsigned int i) |
Kent Overstreet | 9a02b7e | 2013-12-20 17:24:46 -0800 | [diff] [blame] | 816 | { |
| 817 | return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); |
| 818 | } |
| 819 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 820 | /* Btree key macros */ |
| 821 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 822 | /* |
| 823 | * This is used for various on disk data structures - cache_sb, prio_set, bset, |
| 824 | * jset: The checksum is _always_ the first 8 bytes of these structs |
| 825 | */ |
| 826 | #define csum_set(i) \ |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 827 | bch_crc64(((void *) (i)) + sizeof(uint64_t), \ |
Kent Overstreet | fafff81 | 2013-12-17 21:56:21 -0800 | [diff] [blame] | 828 | ((void *) bset_bkey_last(i)) - \ |
| 829 | (((void *) (i)) + sizeof(uint64_t))) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 830 | |
| 831 | /* Error handling macros */ |
| 832 | |
| 833 | #define btree_bug(b, ...) \ |
| 834 | do { \ |
| 835 | if (bch_cache_set_error((b)->c, __VA_ARGS__)) \ |
| 836 | dump_stack(); \ |
| 837 | } while (0) |
| 838 | |
| 839 | #define cache_bug(c, ...) \ |
| 840 | do { \ |
| 841 | if (bch_cache_set_error(c, __VA_ARGS__)) \ |
| 842 | dump_stack(); \ |
| 843 | } while (0) |
| 844 | |
| 845 | #define btree_bug_on(cond, b, ...) \ |
| 846 | do { \ |
| 847 | if (cond) \ |
| 848 | btree_bug(b, __VA_ARGS__); \ |
| 849 | } while (0) |
| 850 | |
| 851 | #define cache_bug_on(cond, c, ...) \ |
| 852 | do { \ |
| 853 | if (cond) \ |
| 854 | cache_bug(c, __VA_ARGS__); \ |
| 855 | } while (0) |
| 856 | |
| 857 | #define cache_set_err_on(cond, c, ...) \ |
| 858 | do { \ |
| 859 | if (cond) \ |
| 860 | bch_cache_set_error(c, __VA_ARGS__); \ |
| 861 | } while (0) |
| 862 | |
| 863 | /* Looping macros */ |
| 864 | |
| 865 | #define for_each_cache(ca, cs, iter) \ |
| 866 | for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++) |
| 867 | |
| 868 | #define for_each_bucket(b, ca) \ |
| 869 | for (b = (ca)->buckets + (ca)->sb.first_bucket; \ |
| 870 | b < (ca)->buckets + (ca)->sb.nbuckets; b++) |
| 871 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 872 | static inline void cached_dev_put(struct cached_dev *dc) |
| 873 | { |
Elena Reshetova | 3b304d2 | 2017-10-30 14:46:32 -0700 | [diff] [blame] | 874 | if (refcount_dec_and_test(&dc->count)) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 875 | schedule_work(&dc->detach); |
| 876 | } |
| 877 | |
| 878 | static inline bool cached_dev_get(struct cached_dev *dc) |
| 879 | { |
Elena Reshetova | 3b304d2 | 2017-10-30 14:46:32 -0700 | [diff] [blame] | 880 | if (!refcount_inc_not_zero(&dc->count)) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 881 | return false; |
| 882 | |
| 883 | /* Paired with the mb in cached_dev_attach */ |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 884 | smp_mb__after_atomic(); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 885 | return true; |
| 886 | } |
| 887 | |
| 888 | /* |
| 889 | * bucket_gc_gen() returns the difference between the bucket's current gen and |
| 890 | * the oldest gen of any pointer into that bucket in the btree (last_gc). |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 891 | */ |
| 892 | |
| 893 | static inline uint8_t bucket_gc_gen(struct bucket *b) |
| 894 | { |
| 895 | return b->gen - b->last_gc; |
| 896 | } |
| 897 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 898 | #define BUCKET_GC_GEN_MAX 96U |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 899 | |
| 900 | #define kobj_attribute_write(n, fn) \ |
Coly Li | 958bf49 | 2018-08-11 13:19:48 +0800 | [diff] [blame] | 901 | static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 902 | |
| 903 | #define kobj_attribute_rw(n, show, store) \ |
| 904 | static struct kobj_attribute ksysfs_##n = \ |
Coly Li | 958bf49 | 2018-08-11 13:19:48 +0800 | [diff] [blame] | 905 | __ATTR(n, 0600, show, store) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 906 | |
Kent Overstreet | 119ba0f | 2013-04-24 19:01:12 -0700 | [diff] [blame] | 907 | static inline void wake_up_allocators(struct cache_set *c) |
| 908 | { |
| 909 | struct cache *ca; |
Coly Li | 6f10f7d | 2018-08-11 13:19:44 +0800 | [diff] [blame] | 910 | unsigned int i; |
Kent Overstreet | 119ba0f | 2013-04-24 19:01:12 -0700 | [diff] [blame] | 911 | |
| 912 | for_each_cache(ca, c, i) |
| 913 | wake_up_process(ca->alloc_thread); |
| 914 | } |
| 915 | |
Coly Li | 771f393 | 2018-03-18 17:36:17 -0700 | [diff] [blame] | 916 | static inline void closure_bio_submit(struct cache_set *c, |
| 917 | struct bio *bio, |
| 918 | struct closure *cl) |
| 919 | { |
| 920 | closure_get(cl); |
| 921 | if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) { |
| 922 | bio->bi_status = BLK_STS_IOERR; |
| 923 | bio_endio(bio); |
| 924 | return; |
| 925 | } |
| 926 | generic_make_request(bio); |
| 927 | } |
| 928 | |
| 929 | /* |
| 930 | * Prevent the kthread exits directly, and make sure when kthread_stop() |
| 931 | * is called to stop a kthread, it is still alive. If a kthread might be |
| 932 | * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is |
| 933 | * necessary before the kthread returns. |
| 934 | */ |
| 935 | static inline void wait_for_kthread_stop(void) |
| 936 | { |
| 937 | while (!kthread_should_stop()) { |
| 938 | set_current_state(TASK_INTERRUPTIBLE); |
| 939 | schedule(); |
| 940 | } |
| 941 | } |
| 942 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 943 | /* Forward declarations */ |
| 944 | |
Coly Li | c7b7bd0 | 2018-03-18 17:36:25 -0700 | [diff] [blame] | 945 | void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio); |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 946 | void bch_count_io_errors(struct cache *ca, blk_status_t error, |
| 947 | int is_read, const char *m); |
| 948 | void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, |
| 949 | blk_status_t error, const char *m); |
| 950 | void bch_bbio_endio(struct cache_set *c, struct bio *bio, |
| 951 | blk_status_t error, const char *m); |
| 952 | void bch_bbio_free(struct bio *bio, struct cache_set *c); |
| 953 | struct bio *bch_bbio_alloc(struct cache_set *c); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 954 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 955 | void __bch_submit_bbio(struct bio *bio, struct cache_set *c); |
| 956 | void bch_submit_bbio(struct bio *bio, struct cache_set *c, |
| 957 | struct bkey *k, unsigned int ptr); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 958 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 959 | uint8_t bch_inc_gen(struct cache *ca, struct bucket *b); |
| 960 | void bch_rescale_priorities(struct cache_set *c, int sectors); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 961 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 962 | bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b); |
| 963 | void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b); |
Kent Overstreet | 2531d9ee | 2014-03-17 16:55:55 -0700 | [diff] [blame] | 964 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 965 | void __bch_bucket_free(struct cache *ca, struct bucket *b); |
| 966 | void bch_bucket_free(struct cache_set *c, struct bkey *k); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 967 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 968 | long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait); |
| 969 | int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, |
| 970 | struct bkey *k, int n, bool wait); |
| 971 | int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, |
| 972 | struct bkey *k, int n, bool wait); |
| 973 | bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, |
| 974 | unsigned int sectors, unsigned int write_point, |
| 975 | unsigned int write_prio, bool wait); |
Coly Li | c7b7bd0 | 2018-03-18 17:36:25 -0700 | [diff] [blame] | 976 | bool bch_cached_dev_error(struct cached_dev *dc); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 977 | |
| 978 | __printf(2, 3) |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 979 | bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 980 | |
Andrea Righi | 84c529a | 2019-11-13 16:03:21 +0800 | [diff] [blame^] | 981 | int bch_prio_write(struct cache *ca, bool wait); |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 982 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 983 | |
Kent Overstreet | 72a4451 | 2013-10-24 17:19:26 -0700 | [diff] [blame] | 984 | extern struct workqueue_struct *bcache_wq; |
Guoju Fang | 0f843e6 | 2018-09-27 23:41:46 +0800 | [diff] [blame] | 985 | extern struct workqueue_struct *bch_journal_wq; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 986 | extern struct mutex bch_register_lock; |
| 987 | extern struct list_head bch_cache_sets; |
| 988 | |
| 989 | extern struct kobj_type bch_cached_dev_ktype; |
| 990 | extern struct kobj_type bch_flash_dev_ktype; |
| 991 | extern struct kobj_type bch_cache_set_ktype; |
| 992 | extern struct kobj_type bch_cache_set_internal_ktype; |
| 993 | extern struct kobj_type bch_cache_ktype; |
| 994 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 995 | void bch_cached_dev_release(struct kobject *kobj); |
| 996 | void bch_flash_dev_release(struct kobject *kobj); |
| 997 | void bch_cache_set_release(struct kobject *kobj); |
| 998 | void bch_cache_release(struct kobject *kobj); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 999 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 1000 | int bch_uuid_write(struct cache_set *c); |
| 1001 | void bcache_write_super(struct cache_set *c); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1002 | |
| 1003 | int bch_flash_dev_create(struct cache_set *c, uint64_t size); |
| 1004 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 1005 | int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, |
| 1006 | uint8_t *set_uuid); |
| 1007 | void bch_cached_dev_detach(struct cached_dev *dc); |
Coly Li | 0b13efe | 2019-06-28 19:59:33 +0800 | [diff] [blame] | 1008 | int bch_cached_dev_run(struct cached_dev *dc); |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 1009 | void bcache_device_stop(struct bcache_device *d); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1010 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 1011 | void bch_cache_set_unregister(struct cache_set *c); |
| 1012 | void bch_cache_set_stop(struct cache_set *c); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1013 | |
Coly Li | fc2d598 | 2018-08-11 13:19:46 +0800 | [diff] [blame] | 1014 | struct cache_set *bch_cache_set_alloc(struct cache_sb *sb); |
| 1015 | void bch_btree_cache_free(struct cache_set *c); |
| 1016 | int bch_btree_cache_alloc(struct cache_set *c); |
| 1017 | void bch_moving_init_cache_set(struct cache_set *c); |
| 1018 | int bch_open_buckets_alloc(struct cache_set *c); |
| 1019 | void bch_open_buckets_free(struct cache_set *c); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1020 | |
Kent Overstreet | 119ba0f | 2013-04-24 19:01:12 -0700 | [diff] [blame] | 1021 | int bch_cache_allocator_start(struct cache *ca); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1022 | |
| 1023 | void bch_debug_exit(void); |
Dongbo Cao | 91bafdf | 2018-10-08 20:41:17 +0800 | [diff] [blame] | 1024 | void bch_debug_init(void); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1025 | void bch_request_exit(void); |
| 1026 | int bch_request_init(void); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1027 | |
| 1028 | #endif /* _BCACHE_H */ |