Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1 | /* |
| 2 | * background writeback - scan btree for dirty data and write it to the backing |
| 3 | * device |
| 4 | * |
| 5 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> |
| 6 | * Copyright 2012 Google, Inc. |
| 7 | */ |
| 8 | |
| 9 | #include "bcache.h" |
| 10 | #include "btree.h" |
| 11 | #include "debug.h" |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 12 | #include "writeback.h" |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 13 | |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 14 | #include <linux/delay.h> |
| 15 | #include <linux/freezer.h> |
| 16 | #include <linux/kthread.h> |
Kent Overstreet | c37511b | 2013-04-26 15:39:55 -0700 | [diff] [blame] | 17 | #include <trace/events/bcache.h> |
| 18 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 19 | /* Rate limiting */ |
| 20 | |
| 21 | static void __update_writeback_rate(struct cached_dev *dc) |
| 22 | { |
| 23 | struct cache_set *c = dc->disk.c; |
| 24 | uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size; |
| 25 | uint64_t cache_dirty_target = |
| 26 | div_u64(cache_sectors * dc->writeback_percent, 100); |
| 27 | |
| 28 | int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev), |
| 29 | c->cached_dev_sectors); |
| 30 | |
| 31 | /* PD controller */ |
| 32 | |
| 33 | int change = 0; |
| 34 | int64_t error; |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 35 | int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 36 | int64_t derivative = dirty - dc->disk.sectors_dirty_last; |
| 37 | |
| 38 | dc->disk.sectors_dirty_last = dirty; |
| 39 | |
| 40 | derivative *= dc->writeback_rate_d_term; |
| 41 | derivative = clamp(derivative, -dirty, dirty); |
| 42 | |
| 43 | derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, |
| 44 | dc->writeback_rate_d_smooth, 0); |
| 45 | |
| 46 | /* Avoid divide by zero */ |
| 47 | if (!target) |
| 48 | goto out; |
| 49 | |
| 50 | error = div64_s64((dirty + derivative - target) << 8, target); |
| 51 | |
| 52 | change = div_s64((dc->writeback_rate.rate * error) >> 8, |
| 53 | dc->writeback_rate_p_term_inverse); |
| 54 | |
| 55 | /* Don't increase writeback rate if the device isn't keeping up */ |
| 56 | if (change > 0 && |
| 57 | time_after64(local_clock(), |
| 58 | dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) |
| 59 | change = 0; |
| 60 | |
| 61 | dc->writeback_rate.rate = |
| 62 | clamp_t(int64_t, dc->writeback_rate.rate + change, |
| 63 | 1, NSEC_PER_MSEC); |
| 64 | out: |
| 65 | dc->writeback_rate_derivative = derivative; |
| 66 | dc->writeback_rate_change = change; |
| 67 | dc->writeback_rate_target = target; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | static void update_writeback_rate(struct work_struct *work) |
| 71 | { |
| 72 | struct cached_dev *dc = container_of(to_delayed_work(work), |
| 73 | struct cached_dev, |
| 74 | writeback_rate_update); |
| 75 | |
| 76 | down_read(&dc->writeback_lock); |
| 77 | |
| 78 | if (atomic_read(&dc->has_dirty) && |
| 79 | dc->writeback_percent) |
| 80 | __update_writeback_rate(dc); |
| 81 | |
| 82 | up_read(&dc->writeback_lock); |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 83 | |
| 84 | schedule_delayed_work(&dc->writeback_rate_update, |
| 85 | dc->writeback_rate_update_seconds * HZ); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) |
| 89 | { |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 90 | uint64_t ret; |
| 91 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 92 | if (atomic_read(&dc->disk.detaching) || |
| 93 | !dc->writeback_percent) |
| 94 | return 0; |
| 95 | |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 96 | ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); |
| 97 | |
| 98 | return min_t(uint64_t, ret, HZ); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 99 | } |
| 100 | |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 101 | struct dirty_io { |
| 102 | struct closure cl; |
| 103 | struct cached_dev *dc; |
| 104 | struct bio bio; |
| 105 | }; |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 106 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 107 | static void dirty_init(struct keybuf_key *w) |
| 108 | { |
| 109 | struct dirty_io *io = w->private; |
| 110 | struct bio *bio = &io->bio; |
| 111 | |
| 112 | bio_init(bio); |
| 113 | if (!io->dc->writeback_percent) |
| 114 | bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); |
| 115 | |
| 116 | bio->bi_size = KEY_SIZE(&w->key) << 9; |
| 117 | bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); |
| 118 | bio->bi_private = w; |
| 119 | bio->bi_io_vec = bio->bi_inline_vecs; |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 120 | bch_bio_map(bio, NULL); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 121 | } |
| 122 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 123 | static void dirty_io_destructor(struct closure *cl) |
| 124 | { |
| 125 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); |
| 126 | kfree(io); |
| 127 | } |
| 128 | |
| 129 | static void write_dirty_finish(struct closure *cl) |
| 130 | { |
| 131 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); |
| 132 | struct keybuf_key *w = io->bio.bi_private; |
| 133 | struct cached_dev *dc = io->dc; |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 134 | struct bio_vec *bv; |
| 135 | int i; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 136 | |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 137 | bio_for_each_segment_all(bv, &io->bio, i) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 138 | __free_page(bv->bv_page); |
| 139 | |
| 140 | /* This is kind of a dumb way of signalling errors. */ |
| 141 | if (KEY_DIRTY(&w->key)) { |
| 142 | unsigned i; |
| 143 | struct btree_op op; |
Kent Overstreet | 0b93207 | 2013-07-24 17:26:51 -0700 | [diff] [blame] | 144 | struct keylist keys; |
| 145 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 146 | bch_btree_op_init_stack(&op); |
Kent Overstreet | 0b93207 | 2013-07-24 17:26:51 -0700 | [diff] [blame] | 147 | bch_keylist_init(&keys); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 148 | |
| 149 | op.type = BTREE_REPLACE; |
| 150 | bkey_copy(&op.replace, &w->key); |
| 151 | |
| 152 | SET_KEY_DIRTY(&w->key, false); |
Kent Overstreet | 0b93207 | 2013-07-24 17:26:51 -0700 | [diff] [blame] | 153 | bch_keylist_add(&keys, &w->key); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 154 | |
| 155 | for (i = 0; i < KEY_PTRS(&w->key); i++) |
| 156 | atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); |
| 157 | |
Kent Overstreet | 0b93207 | 2013-07-24 17:26:51 -0700 | [diff] [blame] | 158 | bch_btree_insert(&op, dc->disk.c, &keys); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 159 | closure_sync(&op.cl); |
| 160 | |
Kent Overstreet | c37511b | 2013-04-26 15:39:55 -0700 | [diff] [blame] | 161 | if (op.insert_collision) |
| 162 | trace_bcache_writeback_collision(&w->key); |
| 163 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 164 | atomic_long_inc(op.insert_collision |
| 165 | ? &dc->disk.c->writeback_keys_failed |
| 166 | : &dc->disk.c->writeback_keys_done); |
| 167 | } |
| 168 | |
| 169 | bch_keybuf_del(&dc->writeback_keys, w); |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 170 | up(&dc->in_flight); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 171 | |
| 172 | closure_return_with_destructor(cl, dirty_io_destructor); |
| 173 | } |
| 174 | |
| 175 | static void dirty_endio(struct bio *bio, int error) |
| 176 | { |
| 177 | struct keybuf_key *w = bio->bi_private; |
| 178 | struct dirty_io *io = w->private; |
| 179 | |
| 180 | if (error) |
| 181 | SET_KEY_DIRTY(&w->key, false); |
| 182 | |
| 183 | closure_put(&io->cl); |
| 184 | } |
| 185 | |
| 186 | static void write_dirty(struct closure *cl) |
| 187 | { |
| 188 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); |
| 189 | struct keybuf_key *w = io->bio.bi_private; |
| 190 | |
| 191 | dirty_init(w); |
| 192 | io->bio.bi_rw = WRITE; |
| 193 | io->bio.bi_sector = KEY_START(&w->key); |
| 194 | io->bio.bi_bdev = io->dc->bdev; |
| 195 | io->bio.bi_end_io = dirty_endio; |
| 196 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 197 | closure_bio_submit(&io->bio, cl, &io->dc->disk); |
| 198 | |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 199 | continue_at(cl, write_dirty_finish, system_wq); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | static void read_dirty_endio(struct bio *bio, int error) |
| 203 | { |
| 204 | struct keybuf_key *w = bio->bi_private; |
| 205 | struct dirty_io *io = w->private; |
| 206 | |
| 207 | bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), |
| 208 | error, "reading dirty data from cache"); |
| 209 | |
| 210 | dirty_endio(bio, error); |
| 211 | } |
| 212 | |
| 213 | static void read_dirty_submit(struct closure *cl) |
| 214 | { |
| 215 | struct dirty_io *io = container_of(cl, struct dirty_io, cl); |
| 216 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 217 | closure_bio_submit(&io->bio, cl, &io->dc->disk); |
| 218 | |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 219 | continue_at(cl, write_dirty, system_wq); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 220 | } |
| 221 | |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 222 | static void read_dirty(struct cached_dev *dc) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 223 | { |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 224 | unsigned delay = 0; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 225 | struct keybuf_key *w; |
| 226 | struct dirty_io *io; |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 227 | struct closure cl; |
| 228 | |
| 229 | closure_init_stack(&cl); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 230 | |
| 231 | /* |
| 232 | * XXX: if we error, background writeback just spins. Should use some |
| 233 | * mempools. |
| 234 | */ |
| 235 | |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 236 | while (!kthread_should_stop()) { |
| 237 | try_to_freeze(); |
| 238 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 239 | w = bch_keybuf_next(&dc->writeback_keys); |
| 240 | if (!w) |
| 241 | break; |
| 242 | |
| 243 | BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); |
| 244 | |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 245 | if (KEY_START(&w->key) != dc->last_read || |
| 246 | jiffies_to_msecs(delay) > 50) |
| 247 | while (!kthread_should_stop() && delay) |
| 248 | delay = schedule_timeout_interruptible(delay); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 249 | |
| 250 | dc->last_read = KEY_OFFSET(&w->key); |
| 251 | |
| 252 | io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec) |
| 253 | * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), |
| 254 | GFP_KERNEL); |
| 255 | if (!io) |
| 256 | goto err; |
| 257 | |
| 258 | w->private = io; |
| 259 | io->dc = dc; |
| 260 | |
| 261 | dirty_init(w); |
| 262 | io->bio.bi_sector = PTR_OFFSET(&w->key, 0); |
| 263 | io->bio.bi_bdev = PTR_CACHE(dc->disk.c, |
| 264 | &w->key, 0)->bdev; |
| 265 | io->bio.bi_rw = READ; |
| 266 | io->bio.bi_end_io = read_dirty_endio; |
| 267 | |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 268 | if (bio_alloc_pages(&io->bio, GFP_KERNEL)) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 269 | goto err_free; |
| 270 | |
Kent Overstreet | c37511b | 2013-04-26 15:39:55 -0700 | [diff] [blame] | 271 | trace_bcache_writeback(&w->key); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 272 | |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 273 | down(&dc->in_flight); |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 274 | closure_call(&io->cl, read_dirty_submit, NULL, &cl); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 275 | |
| 276 | delay = writeback_delay(dc, KEY_SIZE(&w->key)); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | if (0) { |
| 280 | err_free: |
| 281 | kfree(w->private); |
| 282 | err: |
| 283 | bch_keybuf_del(&dc->writeback_keys, w); |
| 284 | } |
| 285 | |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 286 | /* |
| 287 | * Wait for outstanding writeback IOs to finish (and keybuf slots to be |
| 288 | * freed) before refilling again |
| 289 | */ |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 290 | closure_sync(&cl); |
| 291 | } |
| 292 | |
| 293 | /* Scan for dirty data */ |
| 294 | |
| 295 | void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, |
| 296 | uint64_t offset, int nr_sectors) |
| 297 | { |
| 298 | struct bcache_device *d = c->devices[inode]; |
| 299 | unsigned stripe_offset; |
| 300 | uint64_t stripe = offset; |
| 301 | |
| 302 | if (!d) |
| 303 | return; |
| 304 | |
| 305 | do_div(stripe, d->stripe_size); |
| 306 | |
| 307 | stripe_offset = offset & (d->stripe_size - 1); |
| 308 | |
| 309 | while (nr_sectors) { |
| 310 | int s = min_t(unsigned, abs(nr_sectors), |
| 311 | d->stripe_size - stripe_offset); |
| 312 | |
| 313 | if (nr_sectors < 0) |
| 314 | s = -s; |
| 315 | |
| 316 | atomic_add(s, d->stripe_sectors_dirty + stripe); |
| 317 | nr_sectors -= s; |
| 318 | stripe_offset = 0; |
| 319 | stripe++; |
| 320 | } |
| 321 | } |
| 322 | |
| 323 | static bool dirty_pred(struct keybuf *buf, struct bkey *k) |
| 324 | { |
| 325 | return KEY_DIRTY(k); |
| 326 | } |
| 327 | |
| 328 | static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k) |
| 329 | { |
| 330 | uint64_t stripe = KEY_START(k); |
| 331 | unsigned nr_sectors = KEY_SIZE(k); |
| 332 | struct cached_dev *dc = container_of(buf, struct cached_dev, |
| 333 | writeback_keys); |
| 334 | |
| 335 | if (!KEY_DIRTY(k)) |
| 336 | return false; |
| 337 | |
| 338 | do_div(stripe, dc->disk.stripe_size); |
| 339 | |
| 340 | while (1) { |
| 341 | if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) == |
| 342 | dc->disk.stripe_size) |
| 343 | return true; |
| 344 | |
| 345 | if (nr_sectors <= dc->disk.stripe_size) |
| 346 | return false; |
| 347 | |
| 348 | nr_sectors -= dc->disk.stripe_size; |
| 349 | stripe++; |
| 350 | } |
| 351 | } |
| 352 | |
| 353 | static bool refill_dirty(struct cached_dev *dc) |
| 354 | { |
| 355 | struct keybuf *buf = &dc->writeback_keys; |
| 356 | bool searched_from_start = false; |
| 357 | struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); |
| 358 | |
| 359 | if (bkey_cmp(&buf->last_scanned, &end) >= 0) { |
| 360 | buf->last_scanned = KEY(dc->disk.id, 0, 0); |
| 361 | searched_from_start = true; |
| 362 | } |
| 363 | |
| 364 | if (dc->partial_stripes_expensive) { |
| 365 | uint64_t i; |
| 366 | |
| 367 | for (i = 0; i < dc->disk.nr_stripes; i++) |
| 368 | if (atomic_read(dc->disk.stripe_sectors_dirty + i) == |
| 369 | dc->disk.stripe_size) |
| 370 | goto full_stripes; |
| 371 | |
| 372 | goto normal_refill; |
| 373 | full_stripes: |
| 374 | searched_from_start = false; /* not searching entire btree */ |
| 375 | bch_refill_keybuf(dc->disk.c, buf, &end, |
| 376 | dirty_full_stripe_pred); |
| 377 | } else { |
| 378 | normal_refill: |
| 379 | bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); |
| 380 | } |
| 381 | |
| 382 | return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start; |
| 383 | } |
| 384 | |
| 385 | static int bch_writeback_thread(void *arg) |
| 386 | { |
| 387 | struct cached_dev *dc = arg; |
| 388 | bool searched_full_index; |
| 389 | |
| 390 | while (!kthread_should_stop()) { |
| 391 | down_write(&dc->writeback_lock); |
| 392 | if (!atomic_read(&dc->has_dirty) || |
| 393 | (!atomic_read(&dc->disk.detaching) && |
| 394 | !dc->writeback_running)) { |
| 395 | up_write(&dc->writeback_lock); |
| 396 | set_current_state(TASK_INTERRUPTIBLE); |
| 397 | |
| 398 | if (kthread_should_stop()) |
| 399 | return 0; |
| 400 | |
| 401 | try_to_freeze(); |
| 402 | schedule(); |
| 403 | continue; |
| 404 | } |
| 405 | |
| 406 | searched_full_index = refill_dirty(dc); |
| 407 | |
| 408 | if (searched_full_index && |
| 409 | RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { |
| 410 | atomic_set(&dc->has_dirty, 0); |
| 411 | cached_dev_put(dc); |
| 412 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); |
| 413 | bch_write_bdev_super(dc, NULL); |
| 414 | } |
| 415 | |
| 416 | up_write(&dc->writeback_lock); |
| 417 | |
| 418 | bch_ratelimit_reset(&dc->writeback_rate); |
| 419 | read_dirty(dc); |
| 420 | |
| 421 | if (searched_full_index) { |
| 422 | unsigned delay = dc->writeback_delay * HZ; |
| 423 | |
| 424 | while (delay && |
| 425 | !kthread_should_stop() && |
| 426 | !atomic_read(&dc->disk.detaching)) |
| 427 | delay = schedule_timeout_interruptible(delay); |
| 428 | } |
| 429 | } |
| 430 | |
| 431 | return 0; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 432 | } |
| 433 | |
Kent Overstreet | 444fc0b | 2013-05-11 17:07:26 -0700 | [diff] [blame] | 434 | /* Init */ |
| 435 | |
Kent Overstreet | 48dad8b | 2013-09-10 18:48:51 -0700 | [diff] [blame^] | 436 | static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b, |
| 437 | struct bkey *k) |
Kent Overstreet | 444fc0b | 2013-05-11 17:07:26 -0700 | [diff] [blame] | 438 | { |
Kent Overstreet | 48dad8b | 2013-09-10 18:48:51 -0700 | [diff] [blame^] | 439 | if (KEY_INODE(k) > op->inode) |
| 440 | return MAP_DONE; |
Kent Overstreet | 444fc0b | 2013-05-11 17:07:26 -0700 | [diff] [blame] | 441 | |
Kent Overstreet | 48dad8b | 2013-09-10 18:48:51 -0700 | [diff] [blame^] | 442 | if (KEY_DIRTY(k)) |
| 443 | bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), |
| 444 | KEY_START(k), KEY_SIZE(k)); |
Kent Overstreet | 444fc0b | 2013-05-11 17:07:26 -0700 | [diff] [blame] | 445 | |
Kent Overstreet | 48dad8b | 2013-09-10 18:48:51 -0700 | [diff] [blame^] | 446 | return MAP_CONTINUE; |
Kent Overstreet | 444fc0b | 2013-05-11 17:07:26 -0700 | [diff] [blame] | 447 | } |
| 448 | |
| 449 | void bch_sectors_dirty_init(struct cached_dev *dc) |
| 450 | { |
| 451 | struct btree_op op; |
| 452 | |
| 453 | bch_btree_op_init_stack(&op); |
Kent Overstreet | 48dad8b | 2013-09-10 18:48:51 -0700 | [diff] [blame^] | 454 | op.inode = dc->disk.id; |
| 455 | |
| 456 | bch_btree_map_keys(&op, dc->disk.c, &KEY(op.inode, 0, 0), |
| 457 | sectors_dirty_init_fn, 0); |
Kent Overstreet | 444fc0b | 2013-05-11 17:07:26 -0700 | [diff] [blame] | 458 | } |
| 459 | |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 460 | int bch_cached_dev_writeback_init(struct cached_dev *dc) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 461 | { |
Kent Overstreet | c2a4f31 | 2013-09-23 23:17:31 -0700 | [diff] [blame] | 462 | sema_init(&dc->in_flight, 64); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 463 | init_rwsem(&dc->writeback_lock); |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 464 | bch_keybuf_init(&dc->writeback_keys); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 465 | |
| 466 | dc->writeback_metadata = true; |
| 467 | dc->writeback_running = true; |
| 468 | dc->writeback_percent = 10; |
| 469 | dc->writeback_delay = 30; |
| 470 | dc->writeback_rate.rate = 1024; |
| 471 | |
| 472 | dc->writeback_rate_update_seconds = 30; |
| 473 | dc->writeback_rate_d_term = 16; |
| 474 | dc->writeback_rate_p_term_inverse = 64; |
| 475 | dc->writeback_rate_d_smooth = 8; |
| 476 | |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 477 | dc->writeback_thread = kthread_create(bch_writeback_thread, dc, |
| 478 | "bcache_writeback"); |
| 479 | if (IS_ERR(dc->writeback_thread)) |
| 480 | return PTR_ERR(dc->writeback_thread); |
| 481 | |
| 482 | set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE); |
| 483 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 484 | INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); |
| 485 | schedule_delayed_work(&dc->writeback_rate_update, |
| 486 | dc->writeback_rate_update_seconds * HZ); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 487 | |
| 488 | return 0; |
| 489 | } |